input
stringlengths
53
297k
output
stringclasses
604 values
repo_name
stringclasses
376 values
test_path
stringclasses
583 values
code_path
stringlengths
7
116
from numpy.lib import NumpyVersion version = NumpyVersion("1.8.0") version.vstring version.version version.major version.minor version.bugfix version.pre_release version.is_devversion version == version version != version version < "1.8.0" version <= version version > version version >= "1.8.0"
import numpy as np import functools import sys import pytest from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, put_along_axis ) from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, assert_warns ) IS_64BIT = sys.maxsize > 2**32 def _add_keepdims(func): """ hack in keepdims behavior into a function taking an axis """ @functools.wraps(func) def wrapped(a, axis, **kwargs): res = func(a, axis=axis, **kwargs) if axis is None: axis = 0 # res is now a scalar, so we can insert this anywhere return np.expand_dims(res, axis=axis) return wrapped class TestTakeAlongAxis: def test_argequivalent(self): """ Test it translates from arg<func> to <func> """ from numpy.random import rand a = rand(3, 4, 5) funcs = [ (np.sort, np.argsort, dict()), (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), (np.partition, np.argpartition, dict(kth=2)), ] for func, argfunc, kwargs in funcs: for axis in list(range(a.ndim)) + [None]: a_func = func(a, axis=axis, **kwargs) ai_func = argfunc(a, axis=axis, **kwargs) assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) def test_invalid(self): """ Test it errors when indices has too few dimensions """ a = np.ones((10, 10)) ai = np.ones((10, 2), dtype=np.intp) # sanity check take_along_axis(a, ai, axis=1) # not enough indices assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) # bool arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) # float arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) # invalid axis assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) def test_empty(self): """ Test everything is ok with empty results, even with inserted dims """ a = np.ones((3, 4, 5)) ai = np.ones((3, 0, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, ai.shape) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.ones((1, 2, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, (3, 2, 5)) class TestPutAlongAxis: def test_replace_max(self): a_base = np.array([[10, 30, 20], [60, 40, 50]]) for axis in list(range(a_base.ndim)) + [None]: # we mutate this in the loop a = a_base.copy() # replace the max with a small value i_max = _add_keepdims(np.argmax)(a, axis=axis) put_along_axis(a, i_max, -99, axis=axis) # find the new minimum, which should max i_min = _add_keepdims(np.argmin)(a, axis=axis) assert_equal(i_min, i_max) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) class TestApplyAlongAxis: def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_simple101(self): a = np.ones((10, 101), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) assert_array_equal(apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): def double(row): return row * 2 class MyNDArray(np.ndarray): pass m = np.array([[0, 1], [2, 3]]).view(MyNDArray) expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) result = apply_along_axis(double, 0, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) result = apply_along_axis(double, 1, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) def test_subclass(self): class MinimalSubclass(np.ndarray): data = 1 def minimal_function(array): return array.data a = np.zeros((6, 3)).view(MinimalSubclass) assert_array_equal( apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) ) def test_scalar_array(self, cls=np.ndarray): a = np.ones((6, 3)).view(cls) res = apply_along_axis(np.sum, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) def test_0d_array(self, cls=np.ndarray): def sum_to_0d(x): """ Sum x, returning a 0d array of the same class """ assert_equal(x.ndim, 1) return np.squeeze(np.sum(x, keepdims=True)) a = np.ones((6, 3)).view(cls) res = apply_along_axis(sum_to_0d, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) res = apply_along_axis(sum_to_0d, 1, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) return (x[::-1] * x[1:,None]).view(cls) a2d = np.arange(6*3).reshape((6, 3)) # 2d insertion along first axis actual = apply_along_axis(f1to2, 0, a2d) expected = np.stack([ f1to2(a2d[:,i]) for i in range(a2d.shape[1]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 2d insertion along last axis actual = apply_along_axis(f1to2, 1, a2d) expected = np.stack([ f1to2(a2d[i,:]) for i in range(a2d.shape[0]) ], axis=0).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 3d insertion along middle axis a3d = np.arange(6*5*3).reshape((6, 5, 3)) actual = apply_along_axis(f1to2, 1, a3d) expected = np.stack([ np.stack([ f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) ], axis=0) for j in range(a3d.shape[2]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) def test_subclass_preservation(self): class MinimalSubclass(np.ndarray): pass self.test_scalar_array(MinimalSubclass) self.test_0d_array(MinimalSubclass) self.test_axis_insertion(MinimalSubclass) def test_axis_insertion_ma(self): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) res = x[::-1] * x[1:,None] return np.ma.masked_where(res%5==0, res) a = np.arange(6*3).reshape((6, 3)) res = apply_along_axis(f1to2, 0, a) assert_(isinstance(res, np.ma.masked_array)) assert_equal(res.ndim, 3) assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) def test_tuple_func1d(self): def sample_1d(x): return x[1], x[0] res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) assert_array_equal(res, np.array([[2, 1], [4, 3]])) def test_empty(self): # can't apply_along_axis when there's no chance to call the function def never_call(x): assert_(False) # should never be reached a = np.empty((0, 0)) assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) # but it's sometimes ok with some non-zero dimensions def empty_to_1(x): assert_(len(x) == 0) return 1 a = np.empty((10, 0)) actual = np.apply_along_axis(empty_to_1, 1, a) assert_equal(actual, np.ones(10)) assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) def test_with_iterable_object(self): # from issue 5248 d = np.array([ [{1, 11}, {2, 22}, {3, 33}], [{4, 44}, {5, 55}, {6, 66}] ]) actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) assert_equal(actual, expected) # issue 8642 - assert_equal doesn't detect this! for i in np.ndindex(actual.shape): assert_equal(type(actual[i]), type(expected[i])) class TestApplyOverAxes: def test_simple(self): a = np.arange(24).reshape(2, 3, 4) aoa_a = apply_over_axes(np.sum, a, [0, 2]) assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) class TestExpandDims: def test_functionality(self): s = (2, 3, 4, 5) a = np.empty(s) for axis in range(-5, 4): b = expand_dims(a, axis) assert_(b.shape[axis] == 1) assert_(np.squeeze(b).shape == s) def test_axis_tuple(self): a = np.empty((3, 3, 3)) assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) def test_axis_out_of_range(self): s = (2, 3, 4, 5) a = np.empty(s) assert_raises(np.AxisError, expand_dims, a, -6) assert_raises(np.AxisError, expand_dims, a, 5) a = np.empty((3, 3, 3)) assert_raises(np.AxisError, expand_dims, a, (0, -6)) assert_raises(np.AxisError, expand_dims, a, (0, 5)) def test_repeated_axis(self): a = np.empty((3, 3, 3)) assert_raises(ValueError, expand_dims, a, axis=(1, 1)) def test_subclasses(self): a = np.arange(10).reshape((2, 5)) a = np.ma.array(a, mask=a%3 == 0) expanded = np.expand_dims(a, axis=1) assert_(isinstance(expanded, np.ma.MaskedArray)) assert_equal(expanded.shape, (2, 1, 5)) assert_equal(expanded.mask.shape, (2, 1, 5)) class TestArraySplit: def test_integer_0_split(self): a = np.arange(10) assert_raises(ValueError, array_split, a, 0) def test_integer_split(self): a = np.arange(10) res = array_split(a, 1) desired = [np.arange(10)] compare_results(res, desired) res = array_split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) res = array_split(a, 3) desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] compare_results(res, desired) res = array_split(a, 4) desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 5) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 6) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 7) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 8) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 9) desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 10) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 11) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10), np.array([])] compare_results(res, desired) def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=0) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: res = array_split(a, [0, 1, 2], axis=0) tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) desired = [np.array([np.arange(4), np.arange(4)]), np.array([np.arange(4, 7), np.arange(4, 7)]), np.array([np.arange(7, 10), np.arange(7, 10)])] compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") def test_integer_split_2D_rows_greater_max_int32(self): a = np.broadcast_to([0], (1 << 32, 2)) res = array_split(a, 4) chunk = np.broadcast_to([0], (1 << 30, 2)) tgt = [chunk] * 4 for i in range(len(tgt)): assert_equal(res[i].shape, tgt[i].shape) def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_low_bound(self): a = np.arange(10) indices = [0, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_high_bound(self): a = np.arange(10) indices = [0, 5, 7, 10, 12] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10), np.array([]), np.array([])] compare_results(res, desired) class TestSplit: # The split function is essentially the same as array_split, # except that it test if splitting will result in an # equal split. Only test for this case. def test_equal_split(self): a = np.arange(10) res = split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) def test_unequal_split(self): a = np.arange(10) assert_raises(ValueError, split, a, 3) class TestColumnStack: def test_non_iterable(self): assert_raises(TypeError, column_stack, 1) def test_1D_arrays(self): # example from docstring a = np.array((1, 2, 3)) b = np.array((2, 3, 4)) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_2D_arrays(self): # same as hstack 2D docstring example a = np.array([[1], [2], [3]]) b = np.array([[2], [3], [4]]) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_generator(self): with assert_warns(FutureWarning): column_stack((np.arange(3) for _ in range(2))) class TestDstack: def test_non_iterable(self): assert_raises(TypeError, dstack, 1) def test_0D_array(self): a = np.array(1) b = np.array(2) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_1D_array(self): a = np.array([1]) b = np.array([2]) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_2D_array(self): a = np.array([[1], [2]]) b = np.array([[1], [2]]) res = dstack([a, b]) desired = np.array([[[1, 1]], [[2, 2, ]]]) assert_array_equal(res, desired) def test_2D_array2(self): a = np.array([1, 2]) b = np.array([1, 2]) res = dstack([a, b]) desired = np.array([[[1, 1], [2, 2]]]) assert_array_equal(res, desired) def test_generator(self): with assert_warns(FutureWarning): dstack((np.arange(3) for _ in range(2))) # array_split has more comprehensive test of splitting. # only do simple test on hsplit, vsplit, and dsplit class TestHsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, hsplit, 1, 1) def test_0D_array(self): a = np.array(1) try: hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): a = np.array([1, 2, 3, 4]) res = hsplit(a, 2) desired = [np.array([1, 2]), np.array([3, 4])] compare_results(res, desired) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = hsplit(a, 2) desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] compare_results(res, desired) class TestVsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired) class TestDsplit: # Only testing for integer splits. def test_non_iterable(self): assert_raises(ValueError, dsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, dsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) assert_raises(ValueError, dsplit, a, 2) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) try: dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): a = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]], [[1, 2, 3, 4], [1, 2, 3, 4]]]) res = dsplit(a, 2) desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] compare_results(res, desired) class TestSqueeze: def test_basic(self): from numpy.random import rand a = rand(20, 10, 10, 1, 1) b = rand(20, 1, 10, 1, 20) c = rand(1, 1, 20, 10) assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] res = np.squeeze(a) assert_equal(res, 1.5) assert_equal(res.ndim, 0) assert_equal(type(res), np.ndarray) class TestKron: def test_return_type(self): class myarray(np.ndarray): __array_priority__ = 0.0 a = np.ones([2, 2]) ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) assert_equal(type(kron(a, ma)), np.ndarray) assert_equal(type(kron(ma, a)), myarray) class TestTile: def test_basic(self): a = np.array([0, 1, 2]) b = [[1, 2], [3, 4]] assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) def test_tile_one_repetition_on_array_gh4679(self): a = np.arange(5) b = tile(a, 1) b += 2 assert_equal(a, np.arange(5)) def test_empty(self): a = np.array([[[]]]) b = np.array([[], []]) c = tile(b, 2).shape d = tile(a, (3, 2, 5)).shape assert_equal(c, (2, 0)) assert_equal(d, (3, 2, 0)) def test_kroncompare(self): from numpy.random import randint reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = randint(0, 10, size=s) for r in reps: a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) class TestMayShareMemory: def test_basic(self): d = np.ones((50, 60)) d2 = np.ones((30, 60, 6)) assert_(np.may_share_memory(d, d)) assert_(np.may_share_memory(d, d[::-1])) assert_(np.may_share_memory(d, d[::2])) assert_(np.may_share_memory(d, d[1:, ::-1])) assert_(not np.may_share_memory(d[::-1], d2)) assert_(not np.may_share_memory(d[::2], d2)) assert_(not np.may_share_memory(d[1:, ::-1], d2)) assert_(np.may_share_memory(d2[1:, ::-1], d2)) # Utility def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i])
mhvk/numpy
numpy/lib/tests/test_shape_base.py
numpy/typing/tests/data/pass/lib_version.py
""" Shim for _umath_tests to allow a deprecation period for the new name. """ import warnings # 2018-04-04, numpy 1.15.0 warnings.warn(("numpy.core.umath_tests is an internal NumPy " "module and should not be imported. It will " "be removed in a future NumPy release."), category=DeprecationWarning, stacklevel=2) from ._umath_tests import *
import numpy as np import functools import sys import pytest from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, put_along_axis ) from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, assert_warns ) IS_64BIT = sys.maxsize > 2**32 def _add_keepdims(func): """ hack in keepdims behavior into a function taking an axis """ @functools.wraps(func) def wrapped(a, axis, **kwargs): res = func(a, axis=axis, **kwargs) if axis is None: axis = 0 # res is now a scalar, so we can insert this anywhere return np.expand_dims(res, axis=axis) return wrapped class TestTakeAlongAxis: def test_argequivalent(self): """ Test it translates from arg<func> to <func> """ from numpy.random import rand a = rand(3, 4, 5) funcs = [ (np.sort, np.argsort, dict()), (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), (np.partition, np.argpartition, dict(kth=2)), ] for func, argfunc, kwargs in funcs: for axis in list(range(a.ndim)) + [None]: a_func = func(a, axis=axis, **kwargs) ai_func = argfunc(a, axis=axis, **kwargs) assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) def test_invalid(self): """ Test it errors when indices has too few dimensions """ a = np.ones((10, 10)) ai = np.ones((10, 2), dtype=np.intp) # sanity check take_along_axis(a, ai, axis=1) # not enough indices assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) # bool arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) # float arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) # invalid axis assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) def test_empty(self): """ Test everything is ok with empty results, even with inserted dims """ a = np.ones((3, 4, 5)) ai = np.ones((3, 0, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, ai.shape) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.ones((1, 2, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, (3, 2, 5)) class TestPutAlongAxis: def test_replace_max(self): a_base = np.array([[10, 30, 20], [60, 40, 50]]) for axis in list(range(a_base.ndim)) + [None]: # we mutate this in the loop a = a_base.copy() # replace the max with a small value i_max = _add_keepdims(np.argmax)(a, axis=axis) put_along_axis(a, i_max, -99, axis=axis) # find the new minimum, which should max i_min = _add_keepdims(np.argmin)(a, axis=axis) assert_equal(i_min, i_max) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) class TestApplyAlongAxis: def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_simple101(self): a = np.ones((10, 101), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) assert_array_equal(apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): def double(row): return row * 2 class MyNDArray(np.ndarray): pass m = np.array([[0, 1], [2, 3]]).view(MyNDArray) expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) result = apply_along_axis(double, 0, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) result = apply_along_axis(double, 1, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) def test_subclass(self): class MinimalSubclass(np.ndarray): data = 1 def minimal_function(array): return array.data a = np.zeros((6, 3)).view(MinimalSubclass) assert_array_equal( apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) ) def test_scalar_array(self, cls=np.ndarray): a = np.ones((6, 3)).view(cls) res = apply_along_axis(np.sum, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) def test_0d_array(self, cls=np.ndarray): def sum_to_0d(x): """ Sum x, returning a 0d array of the same class """ assert_equal(x.ndim, 1) return np.squeeze(np.sum(x, keepdims=True)) a = np.ones((6, 3)).view(cls) res = apply_along_axis(sum_to_0d, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) res = apply_along_axis(sum_to_0d, 1, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) return (x[::-1] * x[1:,None]).view(cls) a2d = np.arange(6*3).reshape((6, 3)) # 2d insertion along first axis actual = apply_along_axis(f1to2, 0, a2d) expected = np.stack([ f1to2(a2d[:,i]) for i in range(a2d.shape[1]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 2d insertion along last axis actual = apply_along_axis(f1to2, 1, a2d) expected = np.stack([ f1to2(a2d[i,:]) for i in range(a2d.shape[0]) ], axis=0).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 3d insertion along middle axis a3d = np.arange(6*5*3).reshape((6, 5, 3)) actual = apply_along_axis(f1to2, 1, a3d) expected = np.stack([ np.stack([ f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) ], axis=0) for j in range(a3d.shape[2]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) def test_subclass_preservation(self): class MinimalSubclass(np.ndarray): pass self.test_scalar_array(MinimalSubclass) self.test_0d_array(MinimalSubclass) self.test_axis_insertion(MinimalSubclass) def test_axis_insertion_ma(self): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) res = x[::-1] * x[1:,None] return np.ma.masked_where(res%5==0, res) a = np.arange(6*3).reshape((6, 3)) res = apply_along_axis(f1to2, 0, a) assert_(isinstance(res, np.ma.masked_array)) assert_equal(res.ndim, 3) assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) def test_tuple_func1d(self): def sample_1d(x): return x[1], x[0] res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) assert_array_equal(res, np.array([[2, 1], [4, 3]])) def test_empty(self): # can't apply_along_axis when there's no chance to call the function def never_call(x): assert_(False) # should never be reached a = np.empty((0, 0)) assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) # but it's sometimes ok with some non-zero dimensions def empty_to_1(x): assert_(len(x) == 0) return 1 a = np.empty((10, 0)) actual = np.apply_along_axis(empty_to_1, 1, a) assert_equal(actual, np.ones(10)) assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) def test_with_iterable_object(self): # from issue 5248 d = np.array([ [{1, 11}, {2, 22}, {3, 33}], [{4, 44}, {5, 55}, {6, 66}] ]) actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) assert_equal(actual, expected) # issue 8642 - assert_equal doesn't detect this! for i in np.ndindex(actual.shape): assert_equal(type(actual[i]), type(expected[i])) class TestApplyOverAxes: def test_simple(self): a = np.arange(24).reshape(2, 3, 4) aoa_a = apply_over_axes(np.sum, a, [0, 2]) assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) class TestExpandDims: def test_functionality(self): s = (2, 3, 4, 5) a = np.empty(s) for axis in range(-5, 4): b = expand_dims(a, axis) assert_(b.shape[axis] == 1) assert_(np.squeeze(b).shape == s) def test_axis_tuple(self): a = np.empty((3, 3, 3)) assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) def test_axis_out_of_range(self): s = (2, 3, 4, 5) a = np.empty(s) assert_raises(np.AxisError, expand_dims, a, -6) assert_raises(np.AxisError, expand_dims, a, 5) a = np.empty((3, 3, 3)) assert_raises(np.AxisError, expand_dims, a, (0, -6)) assert_raises(np.AxisError, expand_dims, a, (0, 5)) def test_repeated_axis(self): a = np.empty((3, 3, 3)) assert_raises(ValueError, expand_dims, a, axis=(1, 1)) def test_subclasses(self): a = np.arange(10).reshape((2, 5)) a = np.ma.array(a, mask=a%3 == 0) expanded = np.expand_dims(a, axis=1) assert_(isinstance(expanded, np.ma.MaskedArray)) assert_equal(expanded.shape, (2, 1, 5)) assert_equal(expanded.mask.shape, (2, 1, 5)) class TestArraySplit: def test_integer_0_split(self): a = np.arange(10) assert_raises(ValueError, array_split, a, 0) def test_integer_split(self): a = np.arange(10) res = array_split(a, 1) desired = [np.arange(10)] compare_results(res, desired) res = array_split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) res = array_split(a, 3) desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] compare_results(res, desired) res = array_split(a, 4) desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 5) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 6) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 7) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 8) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 9) desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 10) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 11) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10), np.array([])] compare_results(res, desired) def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=0) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: res = array_split(a, [0, 1, 2], axis=0) tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) desired = [np.array([np.arange(4), np.arange(4)]), np.array([np.arange(4, 7), np.arange(4, 7)]), np.array([np.arange(7, 10), np.arange(7, 10)])] compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") def test_integer_split_2D_rows_greater_max_int32(self): a = np.broadcast_to([0], (1 << 32, 2)) res = array_split(a, 4) chunk = np.broadcast_to([0], (1 << 30, 2)) tgt = [chunk] * 4 for i in range(len(tgt)): assert_equal(res[i].shape, tgt[i].shape) def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_low_bound(self): a = np.arange(10) indices = [0, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_high_bound(self): a = np.arange(10) indices = [0, 5, 7, 10, 12] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10), np.array([]), np.array([])] compare_results(res, desired) class TestSplit: # The split function is essentially the same as array_split, # except that it test if splitting will result in an # equal split. Only test for this case. def test_equal_split(self): a = np.arange(10) res = split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) def test_unequal_split(self): a = np.arange(10) assert_raises(ValueError, split, a, 3) class TestColumnStack: def test_non_iterable(self): assert_raises(TypeError, column_stack, 1) def test_1D_arrays(self): # example from docstring a = np.array((1, 2, 3)) b = np.array((2, 3, 4)) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_2D_arrays(self): # same as hstack 2D docstring example a = np.array([[1], [2], [3]]) b = np.array([[2], [3], [4]]) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_generator(self): with assert_warns(FutureWarning): column_stack((np.arange(3) for _ in range(2))) class TestDstack: def test_non_iterable(self): assert_raises(TypeError, dstack, 1) def test_0D_array(self): a = np.array(1) b = np.array(2) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_1D_array(self): a = np.array([1]) b = np.array([2]) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_2D_array(self): a = np.array([[1], [2]]) b = np.array([[1], [2]]) res = dstack([a, b]) desired = np.array([[[1, 1]], [[2, 2, ]]]) assert_array_equal(res, desired) def test_2D_array2(self): a = np.array([1, 2]) b = np.array([1, 2]) res = dstack([a, b]) desired = np.array([[[1, 1], [2, 2]]]) assert_array_equal(res, desired) def test_generator(self): with assert_warns(FutureWarning): dstack((np.arange(3) for _ in range(2))) # array_split has more comprehensive test of splitting. # only do simple test on hsplit, vsplit, and dsplit class TestHsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, hsplit, 1, 1) def test_0D_array(self): a = np.array(1) try: hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): a = np.array([1, 2, 3, 4]) res = hsplit(a, 2) desired = [np.array([1, 2]), np.array([3, 4])] compare_results(res, desired) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = hsplit(a, 2) desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] compare_results(res, desired) class TestVsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired) class TestDsplit: # Only testing for integer splits. def test_non_iterable(self): assert_raises(ValueError, dsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, dsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) assert_raises(ValueError, dsplit, a, 2) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) try: dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): a = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]], [[1, 2, 3, 4], [1, 2, 3, 4]]]) res = dsplit(a, 2) desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] compare_results(res, desired) class TestSqueeze: def test_basic(self): from numpy.random import rand a = rand(20, 10, 10, 1, 1) b = rand(20, 1, 10, 1, 20) c = rand(1, 1, 20, 10) assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] res = np.squeeze(a) assert_equal(res, 1.5) assert_equal(res.ndim, 0) assert_equal(type(res), np.ndarray) class TestKron: def test_return_type(self): class myarray(np.ndarray): __array_priority__ = 0.0 a = np.ones([2, 2]) ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) assert_equal(type(kron(a, ma)), np.ndarray) assert_equal(type(kron(ma, a)), myarray) class TestTile: def test_basic(self): a = np.array([0, 1, 2]) b = [[1, 2], [3, 4]] assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) def test_tile_one_repetition_on_array_gh4679(self): a = np.arange(5) b = tile(a, 1) b += 2 assert_equal(a, np.arange(5)) def test_empty(self): a = np.array([[[]]]) b = np.array([[], []]) c = tile(b, 2).shape d = tile(a, (3, 2, 5)).shape assert_equal(c, (2, 0)) assert_equal(d, (3, 2, 0)) def test_kroncompare(self): from numpy.random import randint reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = randint(0, 10, size=s) for r in reps: a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) class TestMayShareMemory: def test_basic(self): d = np.ones((50, 60)) d2 = np.ones((30, 60, 6)) assert_(np.may_share_memory(d, d)) assert_(np.may_share_memory(d, d[::-1])) assert_(np.may_share_memory(d, d[::2])) assert_(np.may_share_memory(d, d[1:, ::-1])) assert_(not np.may_share_memory(d[::-1], d2)) assert_(not np.may_share_memory(d[::2], d2)) assert_(not np.may_share_memory(d[1:, ::-1], d2)) assert_(np.may_share_memory(d2[1:, ::-1], d2)) # Utility def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i])
mhvk/numpy
numpy/lib/tests/test_shape_base.py
numpy/core/umath_tests.py
from typing import Any, List import numpy as np import numpy.typing as npt # Can't directly import `np.float128` as it is not available on all platforms f16: np.floating[npt._128Bit] c16 = np.complex128() f8 = np.float64() i8 = np.int64() u8 = np.uint64() c8 = np.complex64() f4 = np.float32() i4 = np.int32() u4 = np.uint32() dt = np.datetime64(0, "D") td = np.timedelta64(0, "D") b_ = np.bool_() b = bool() c = complex() f = float() i = int() AR_b: np.ndarray[Any, np.dtype[np.bool_]] AR_u: np.ndarray[Any, np.dtype[np.uint32]] AR_i: np.ndarray[Any, np.dtype[np.int64]] AR_f: np.ndarray[Any, np.dtype[np.float64]] AR_c: np.ndarray[Any, np.dtype[np.complex128]] AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] AR_M: np.ndarray[Any, np.dtype[np.datetime64]] AR_O: np.ndarray[Any, np.dtype[np.object_]] AR_LIKE_b: List[bool] AR_LIKE_u: List[np.uint32] AR_LIKE_i: List[int] AR_LIKE_f: List[float] AR_LIKE_c: List[complex] AR_LIKE_m: List[np.timedelta64] AR_LIKE_M: List[np.datetime64] AR_LIKE_O: List[np.object_] # Array subtraction reveal_type(AR_b - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_b - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_b - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_b - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_b - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_b - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_u - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_LIKE_i - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_f - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_c - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_m - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_M - AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_b) # E: Any reveal_type(AR_u - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_u - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_u - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_u - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_u - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_u - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_u - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_LIKE_u - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_LIKE_i - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_f - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_c - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_m - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_M - AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_u) # E: Any reveal_type(AR_i - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_i - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_i - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_i - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_i - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_i - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_i - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_u - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_i - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_f - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_c - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_m - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_M - AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_i) # E: Any reveal_type(AR_f - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_f - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_u - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_i - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_f - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_c - AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_O - AR_f) # E: Any reveal_type(AR_c - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_c - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_c - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_c - AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_c - AR_LIKE_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_c - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_u - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_i - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_f - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_c - AR_c) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]] reveal_type(AR_LIKE_O - AR_c) # E: Any reveal_type(AR_m - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_u - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_i - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_m - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_M - AR_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_LIKE_O - AR_m) # E: Any reveal_type(AR_M - AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_M - AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_M - AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_M - AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[numpy.datetime64]] reveal_type(AR_M - AR_LIKE_M) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_M - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_M - AR_M) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O - AR_M) # E: Any reveal_type(AR_O - AR_LIKE_b) # E: Any reveal_type(AR_O - AR_LIKE_u) # E: Any reveal_type(AR_O - AR_LIKE_i) # E: Any reveal_type(AR_O - AR_LIKE_f) # E: Any reveal_type(AR_O - AR_LIKE_c) # E: Any reveal_type(AR_O - AR_LIKE_m) # E: Any reveal_type(AR_O - AR_LIKE_M) # E: Any reveal_type(AR_O - AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b - AR_O) # E: Any reveal_type(AR_LIKE_u - AR_O) # E: Any reveal_type(AR_LIKE_i - AR_O) # E: Any reveal_type(AR_LIKE_f - AR_O) # E: Any reveal_type(AR_LIKE_c - AR_O) # E: Any reveal_type(AR_LIKE_m - AR_O) # E: Any reveal_type(AR_LIKE_M - AR_O) # E: Any reveal_type(AR_LIKE_O - AR_O) # E: Any # Array floor division reveal_type(AR_b // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] reveal_type(AR_b // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_b // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_b // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_b // AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b // AR_b) # E: numpy.ndarray[Any, numpy.dtype[{int8}]] reveal_type(AR_LIKE_u // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_LIKE_i // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_f // AR_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_O // AR_b) # E: Any reveal_type(AR_u // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_u // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_u // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_u // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_u // AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_LIKE_u // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.unsignedinteger[Any]]] reveal_type(AR_LIKE_i // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_f // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_m // AR_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O // AR_u) # E: Any reveal_type(AR_i // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_i // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_i // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_i // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_i // AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_u // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_i // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]] reveal_type(AR_LIKE_f // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_m // AR_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O // AR_i) # E: Any reveal_type(AR_f // AR_LIKE_b) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_f // AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_u // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_i // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_f // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]] reveal_type(AR_LIKE_m // AR_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_LIKE_O // AR_f) # E: Any reveal_type(AR_m // AR_LIKE_u) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m // AR_LIKE_i) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m // AR_LIKE_f) # E: numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]] reveal_type(AR_m // AR_LIKE_m) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] reveal_type(AR_m // AR_LIKE_O) # E: Any reveal_type(AR_LIKE_m // AR_m) # E: numpy.ndarray[Any, numpy.dtype[{int64}]] reveal_type(AR_LIKE_O // AR_m) # E: Any reveal_type(AR_O // AR_LIKE_b) # E: Any reveal_type(AR_O // AR_LIKE_u) # E: Any reveal_type(AR_O // AR_LIKE_i) # E: Any reveal_type(AR_O // AR_LIKE_f) # E: Any reveal_type(AR_O // AR_LIKE_m) # E: Any reveal_type(AR_O // AR_LIKE_M) # E: Any reveal_type(AR_O // AR_LIKE_O) # E: Any reveal_type(AR_LIKE_b // AR_O) # E: Any reveal_type(AR_LIKE_u // AR_O) # E: Any reveal_type(AR_LIKE_i // AR_O) # E: Any reveal_type(AR_LIKE_f // AR_O) # E: Any reveal_type(AR_LIKE_m // AR_O) # E: Any reveal_type(AR_LIKE_M // AR_O) # E: Any reveal_type(AR_LIKE_O // AR_O) # E: Any # unary ops reveal_type(-f16) # E: {float128} reveal_type(-c16) # E: {complex128} reveal_type(-c8) # E: {complex64} reveal_type(-f8) # E: {float64} reveal_type(-f4) # E: {float32} reveal_type(-i8) # E: {int64} reveal_type(-i4) # E: {int32} reveal_type(-u8) # E: {uint64} reveal_type(-u4) # E: {uint32} reveal_type(-td) # E: numpy.timedelta64 reveal_type(-AR_f) # E: Any reveal_type(+f16) # E: {float128} reveal_type(+c16) # E: {complex128} reveal_type(+c8) # E: {complex64} reveal_type(+f8) # E: {float64} reveal_type(+f4) # E: {float32} reveal_type(+i8) # E: {int64} reveal_type(+i4) # E: {int32} reveal_type(+u8) # E: {uint64} reveal_type(+u4) # E: {uint32} reveal_type(+td) # E: numpy.timedelta64 reveal_type(+AR_f) # E: Any reveal_type(abs(f16)) # E: {float128} reveal_type(abs(c16)) # E: {float64} reveal_type(abs(c8)) # E: {float32} reveal_type(abs(f8)) # E: {float64} reveal_type(abs(f4)) # E: {float32} reveal_type(abs(i8)) # E: {int64} reveal_type(abs(i4)) # E: {int32} reveal_type(abs(u8)) # E: {uint64} reveal_type(abs(u4)) # E: {uint32} reveal_type(abs(td)) # E: numpy.timedelta64 reveal_type(abs(b_)) # E: numpy.bool_ reveal_type(abs(AR_f)) # E: Any # Time structures reveal_type(dt + td) # E: numpy.datetime64 reveal_type(dt + i) # E: numpy.datetime64 reveal_type(dt + i4) # E: numpy.datetime64 reveal_type(dt + i8) # E: numpy.datetime64 reveal_type(dt - dt) # E: numpy.timedelta64 reveal_type(dt - i) # E: numpy.datetime64 reveal_type(dt - i4) # E: numpy.datetime64 reveal_type(dt - i8) # E: numpy.datetime64 reveal_type(td + td) # E: numpy.timedelta64 reveal_type(td + i) # E: numpy.timedelta64 reveal_type(td + i4) # E: numpy.timedelta64 reveal_type(td + i8) # E: numpy.timedelta64 reveal_type(td - td) # E: numpy.timedelta64 reveal_type(td - i) # E: numpy.timedelta64 reveal_type(td - i4) # E: numpy.timedelta64 reveal_type(td - i8) # E: numpy.timedelta64 reveal_type(td / f) # E: numpy.timedelta64 reveal_type(td / f4) # E: numpy.timedelta64 reveal_type(td / f8) # E: numpy.timedelta64 reveal_type(td / td) # E: {float64} reveal_type(td // td) # E: {int64} # boolean reveal_type(b_ / b) # E: {float64} reveal_type(b_ / b_) # E: {float64} reveal_type(b_ / i) # E: {float64} reveal_type(b_ / i8) # E: {float64} reveal_type(b_ / i4) # E: {float64} reveal_type(b_ / u8) # E: {float64} reveal_type(b_ / u4) # E: {float64} reveal_type(b_ / f) # E: {float64} reveal_type(b_ / f16) # E: {float128} reveal_type(b_ / f8) # E: {float64} reveal_type(b_ / f4) # E: {float32} reveal_type(b_ / c) # E: {complex128} reveal_type(b_ / c16) # E: {complex128} reveal_type(b_ / c8) # E: {complex64} reveal_type(b / b_) # E: {float64} reveal_type(b_ / b_) # E: {float64} reveal_type(i / b_) # E: {float64} reveal_type(i8 / b_) # E: {float64} reveal_type(i4 / b_) # E: {float64} reveal_type(u8 / b_) # E: {float64} reveal_type(u4 / b_) # E: {float64} reveal_type(f / b_) # E: {float64} reveal_type(f16 / b_) # E: {float128} reveal_type(f8 / b_) # E: {float64} reveal_type(f4 / b_) # E: {float32} reveal_type(c / b_) # E: {complex128} reveal_type(c16 / b_) # E: {complex128} reveal_type(c8 / b_) # E: {complex64} # Complex reveal_type(c16 + f16) # E: {complex256} reveal_type(c16 + c16) # E: {complex128} reveal_type(c16 + f8) # E: {complex128} reveal_type(c16 + i8) # E: {complex128} reveal_type(c16 + c8) # E: {complex128} reveal_type(c16 + f4) # E: {complex128} reveal_type(c16 + i4) # E: {complex128} reveal_type(c16 + b_) # E: {complex128} reveal_type(c16 + b) # E: {complex128} reveal_type(c16 + c) # E: {complex128} reveal_type(c16 + f) # E: {complex128} reveal_type(c16 + i) # E: {complex128} reveal_type(c16 + AR_f) # E: Any reveal_type(f16 + c16) # E: {complex256} reveal_type(c16 + c16) # E: {complex128} reveal_type(f8 + c16) # E: {complex128} reveal_type(i8 + c16) # E: {complex128} reveal_type(c8 + c16) # E: {complex128} reveal_type(f4 + c16) # E: {complex128} reveal_type(i4 + c16) # E: {complex128} reveal_type(b_ + c16) # E: {complex128} reveal_type(b + c16) # E: {complex128} reveal_type(c + c16) # E: {complex128} reveal_type(f + c16) # E: {complex128} reveal_type(i + c16) # E: {complex128} reveal_type(AR_f + c16) # E: Any reveal_type(c8 + f16) # E: {complex256} reveal_type(c8 + c16) # E: {complex128} reveal_type(c8 + f8) # E: {complex128} reveal_type(c8 + i8) # E: {complex128} reveal_type(c8 + c8) # E: {complex64} reveal_type(c8 + f4) # E: {complex64} reveal_type(c8 + i4) # E: {complex64} reveal_type(c8 + b_) # E: {complex64} reveal_type(c8 + b) # E: {complex64} reveal_type(c8 + c) # E: {complex128} reveal_type(c8 + f) # E: {complex128} reveal_type(c8 + i) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] reveal_type(c8 + AR_f) # E: Any reveal_type(f16 + c8) # E: {complex256} reveal_type(c16 + c8) # E: {complex128} reveal_type(f8 + c8) # E: {complex128} reveal_type(i8 + c8) # E: {complex128} reveal_type(c8 + c8) # E: {complex64} reveal_type(f4 + c8) # E: {complex64} reveal_type(i4 + c8) # E: {complex64} reveal_type(b_ + c8) # E: {complex64} reveal_type(b + c8) # E: {complex64} reveal_type(c + c8) # E: {complex128} reveal_type(f + c8) # E: {complex128} reveal_type(i + c8) # E: numpy.complexfloating[{_NBitInt}, {_NBitInt}] reveal_type(AR_f + c8) # E: Any # Float reveal_type(f8 + f16) # E: {float128} reveal_type(f8 + f8) # E: {float64} reveal_type(f8 + i8) # E: {float64} reveal_type(f8 + f4) # E: {float64} reveal_type(f8 + i4) # E: {float64} reveal_type(f8 + b_) # E: {float64} reveal_type(f8 + b) # E: {float64} reveal_type(f8 + c) # E: {complex128} reveal_type(f8 + f) # E: {float64} reveal_type(f8 + i) # E: {float64} reveal_type(f8 + AR_f) # E: Any reveal_type(f16 + f8) # E: {float128} reveal_type(f8 + f8) # E: {float64} reveal_type(i8 + f8) # E: {float64} reveal_type(f4 + f8) # E: {float64} reveal_type(i4 + f8) # E: {float64} reveal_type(b_ + f8) # E: {float64} reveal_type(b + f8) # E: {float64} reveal_type(c + f8) # E: {complex128} reveal_type(f + f8) # E: {float64} reveal_type(i + f8) # E: {float64} reveal_type(AR_f + f8) # E: Any reveal_type(f4 + f16) # E: {float128} reveal_type(f4 + f8) # E: {float64} reveal_type(f4 + i8) # E: {float64} reveal_type(f4 + f4) # E: {float32} reveal_type(f4 + i4) # E: {float32} reveal_type(f4 + b_) # E: {float32} reveal_type(f4 + b) # E: {float32} reveal_type(f4 + c) # E: {complex128} reveal_type(f4 + f) # E: {float64} reveal_type(f4 + i) # E: numpy.floating[{_NBitInt}] reveal_type(f4 + AR_f) # E: Any reveal_type(f16 + f4) # E: {float128} reveal_type(f8 + f4) # E: {float64} reveal_type(i8 + f4) # E: {float64} reveal_type(f4 + f4) # E: {float32} reveal_type(i4 + f4) # E: {float32} reveal_type(b_ + f4) # E: {float32} reveal_type(b + f4) # E: {float32} reveal_type(c + f4) # E: {complex128} reveal_type(f + f4) # E: {float64} reveal_type(i + f4) # E: numpy.floating[{_NBitInt}] reveal_type(AR_f + f4) # E: Any # Int reveal_type(i8 + i8) # E: {int64} reveal_type(i8 + u8) # E: Any reveal_type(i8 + i4) # E: {int64} reveal_type(i8 + u4) # E: Any reveal_type(i8 + b_) # E: {int64} reveal_type(i8 + b) # E: {int64} reveal_type(i8 + c) # E: {complex128} reveal_type(i8 + f) # E: {float64} reveal_type(i8 + i) # E: {int64} reveal_type(i8 + AR_f) # E: Any reveal_type(u8 + u8) # E: {uint64} reveal_type(u8 + i4) # E: Any reveal_type(u8 + u4) # E: {uint64} reveal_type(u8 + b_) # E: {uint64} reveal_type(u8 + b) # E: {uint64} reveal_type(u8 + c) # E: {complex128} reveal_type(u8 + f) # E: {float64} reveal_type(u8 + i) # E: Any reveal_type(u8 + AR_f) # E: Any reveal_type(i8 + i8) # E: {int64} reveal_type(u8 + i8) # E: Any reveal_type(i4 + i8) # E: {int64} reveal_type(u4 + i8) # E: Any reveal_type(b_ + i8) # E: {int64} reveal_type(b + i8) # E: {int64} reveal_type(c + i8) # E: {complex128} reveal_type(f + i8) # E: {float64} reveal_type(i + i8) # E: {int64} reveal_type(AR_f + i8) # E: Any reveal_type(u8 + u8) # E: {uint64} reveal_type(i4 + u8) # E: Any reveal_type(u4 + u8) # E: {uint64} reveal_type(b_ + u8) # E: {uint64} reveal_type(b + u8) # E: {uint64} reveal_type(c + u8) # E: {complex128} reveal_type(f + u8) # E: {float64} reveal_type(i + u8) # E: Any reveal_type(AR_f + u8) # E: Any reveal_type(i4 + i8) # E: {int64} reveal_type(i4 + i4) # E: {int32} reveal_type(i4 + i) # E: {int_} reveal_type(i4 + b_) # E: {int32} reveal_type(i4 + b) # E: {int32} reveal_type(i4 + AR_f) # E: Any reveal_type(u4 + i8) # E: Any reveal_type(u4 + i4) # E: Any reveal_type(u4 + u8) # E: {uint64} reveal_type(u4 + u4) # E: {uint32} reveal_type(u4 + i) # E: Any reveal_type(u4 + b_) # E: {uint32} reveal_type(u4 + b) # E: {uint32} reveal_type(u4 + AR_f) # E: Any reveal_type(i8 + i4) # E: {int64} reveal_type(i4 + i4) # E: {int32} reveal_type(i + i4) # E: {int_} reveal_type(b_ + i4) # E: {int32} reveal_type(b + i4) # E: {int32} reveal_type(AR_f + i4) # E: Any reveal_type(i8 + u4) # E: Any reveal_type(i4 + u4) # E: Any reveal_type(u8 + u4) # E: {uint64} reveal_type(u4 + u4) # E: {uint32} reveal_type(b_ + u4) # E: {uint32} reveal_type(b + u4) # E: {uint32} reveal_type(i + u4) # E: Any reveal_type(AR_f + u4) # E: Any
import numpy as np import functools import sys import pytest from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, put_along_axis ) from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, assert_warns ) IS_64BIT = sys.maxsize > 2**32 def _add_keepdims(func): """ hack in keepdims behavior into a function taking an axis """ @functools.wraps(func) def wrapped(a, axis, **kwargs): res = func(a, axis=axis, **kwargs) if axis is None: axis = 0 # res is now a scalar, so we can insert this anywhere return np.expand_dims(res, axis=axis) return wrapped class TestTakeAlongAxis: def test_argequivalent(self): """ Test it translates from arg<func> to <func> """ from numpy.random import rand a = rand(3, 4, 5) funcs = [ (np.sort, np.argsort, dict()), (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), (np.partition, np.argpartition, dict(kth=2)), ] for func, argfunc, kwargs in funcs: for axis in list(range(a.ndim)) + [None]: a_func = func(a, axis=axis, **kwargs) ai_func = argfunc(a, axis=axis, **kwargs) assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) def test_invalid(self): """ Test it errors when indices has too few dimensions """ a = np.ones((10, 10)) ai = np.ones((10, 2), dtype=np.intp) # sanity check take_along_axis(a, ai, axis=1) # not enough indices assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) # bool arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) # float arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) # invalid axis assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) def test_empty(self): """ Test everything is ok with empty results, even with inserted dims """ a = np.ones((3, 4, 5)) ai = np.ones((3, 0, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, ai.shape) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.ones((1, 2, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, (3, 2, 5)) class TestPutAlongAxis: def test_replace_max(self): a_base = np.array([[10, 30, 20], [60, 40, 50]]) for axis in list(range(a_base.ndim)) + [None]: # we mutate this in the loop a = a_base.copy() # replace the max with a small value i_max = _add_keepdims(np.argmax)(a, axis=axis) put_along_axis(a, i_max, -99, axis=axis) # find the new minimum, which should max i_min = _add_keepdims(np.argmin)(a, axis=axis) assert_equal(i_min, i_max) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) class TestApplyAlongAxis: def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_simple101(self): a = np.ones((10, 101), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) assert_array_equal(apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): def double(row): return row * 2 class MyNDArray(np.ndarray): pass m = np.array([[0, 1], [2, 3]]).view(MyNDArray) expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) result = apply_along_axis(double, 0, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) result = apply_along_axis(double, 1, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) def test_subclass(self): class MinimalSubclass(np.ndarray): data = 1 def minimal_function(array): return array.data a = np.zeros((6, 3)).view(MinimalSubclass) assert_array_equal( apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) ) def test_scalar_array(self, cls=np.ndarray): a = np.ones((6, 3)).view(cls) res = apply_along_axis(np.sum, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) def test_0d_array(self, cls=np.ndarray): def sum_to_0d(x): """ Sum x, returning a 0d array of the same class """ assert_equal(x.ndim, 1) return np.squeeze(np.sum(x, keepdims=True)) a = np.ones((6, 3)).view(cls) res = apply_along_axis(sum_to_0d, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) res = apply_along_axis(sum_to_0d, 1, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) return (x[::-1] * x[1:,None]).view(cls) a2d = np.arange(6*3).reshape((6, 3)) # 2d insertion along first axis actual = apply_along_axis(f1to2, 0, a2d) expected = np.stack([ f1to2(a2d[:,i]) for i in range(a2d.shape[1]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 2d insertion along last axis actual = apply_along_axis(f1to2, 1, a2d) expected = np.stack([ f1to2(a2d[i,:]) for i in range(a2d.shape[0]) ], axis=0).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 3d insertion along middle axis a3d = np.arange(6*5*3).reshape((6, 5, 3)) actual = apply_along_axis(f1to2, 1, a3d) expected = np.stack([ np.stack([ f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) ], axis=0) for j in range(a3d.shape[2]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) def test_subclass_preservation(self): class MinimalSubclass(np.ndarray): pass self.test_scalar_array(MinimalSubclass) self.test_0d_array(MinimalSubclass) self.test_axis_insertion(MinimalSubclass) def test_axis_insertion_ma(self): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) res = x[::-1] * x[1:,None] return np.ma.masked_where(res%5==0, res) a = np.arange(6*3).reshape((6, 3)) res = apply_along_axis(f1to2, 0, a) assert_(isinstance(res, np.ma.masked_array)) assert_equal(res.ndim, 3) assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) def test_tuple_func1d(self): def sample_1d(x): return x[1], x[0] res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) assert_array_equal(res, np.array([[2, 1], [4, 3]])) def test_empty(self): # can't apply_along_axis when there's no chance to call the function def never_call(x): assert_(False) # should never be reached a = np.empty((0, 0)) assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) # but it's sometimes ok with some non-zero dimensions def empty_to_1(x): assert_(len(x) == 0) return 1 a = np.empty((10, 0)) actual = np.apply_along_axis(empty_to_1, 1, a) assert_equal(actual, np.ones(10)) assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) def test_with_iterable_object(self): # from issue 5248 d = np.array([ [{1, 11}, {2, 22}, {3, 33}], [{4, 44}, {5, 55}, {6, 66}] ]) actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) assert_equal(actual, expected) # issue 8642 - assert_equal doesn't detect this! for i in np.ndindex(actual.shape): assert_equal(type(actual[i]), type(expected[i])) class TestApplyOverAxes: def test_simple(self): a = np.arange(24).reshape(2, 3, 4) aoa_a = apply_over_axes(np.sum, a, [0, 2]) assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) class TestExpandDims: def test_functionality(self): s = (2, 3, 4, 5) a = np.empty(s) for axis in range(-5, 4): b = expand_dims(a, axis) assert_(b.shape[axis] == 1) assert_(np.squeeze(b).shape == s) def test_axis_tuple(self): a = np.empty((3, 3, 3)) assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) def test_axis_out_of_range(self): s = (2, 3, 4, 5) a = np.empty(s) assert_raises(np.AxisError, expand_dims, a, -6) assert_raises(np.AxisError, expand_dims, a, 5) a = np.empty((3, 3, 3)) assert_raises(np.AxisError, expand_dims, a, (0, -6)) assert_raises(np.AxisError, expand_dims, a, (0, 5)) def test_repeated_axis(self): a = np.empty((3, 3, 3)) assert_raises(ValueError, expand_dims, a, axis=(1, 1)) def test_subclasses(self): a = np.arange(10).reshape((2, 5)) a = np.ma.array(a, mask=a%3 == 0) expanded = np.expand_dims(a, axis=1) assert_(isinstance(expanded, np.ma.MaskedArray)) assert_equal(expanded.shape, (2, 1, 5)) assert_equal(expanded.mask.shape, (2, 1, 5)) class TestArraySplit: def test_integer_0_split(self): a = np.arange(10) assert_raises(ValueError, array_split, a, 0) def test_integer_split(self): a = np.arange(10) res = array_split(a, 1) desired = [np.arange(10)] compare_results(res, desired) res = array_split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) res = array_split(a, 3) desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] compare_results(res, desired) res = array_split(a, 4) desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 5) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 6) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 7) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 8) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 9) desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 10) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 11) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10), np.array([])] compare_results(res, desired) def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=0) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: res = array_split(a, [0, 1, 2], axis=0) tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) desired = [np.array([np.arange(4), np.arange(4)]), np.array([np.arange(4, 7), np.arange(4, 7)]), np.array([np.arange(7, 10), np.arange(7, 10)])] compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") def test_integer_split_2D_rows_greater_max_int32(self): a = np.broadcast_to([0], (1 << 32, 2)) res = array_split(a, 4) chunk = np.broadcast_to([0], (1 << 30, 2)) tgt = [chunk] * 4 for i in range(len(tgt)): assert_equal(res[i].shape, tgt[i].shape) def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_low_bound(self): a = np.arange(10) indices = [0, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_high_bound(self): a = np.arange(10) indices = [0, 5, 7, 10, 12] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10), np.array([]), np.array([])] compare_results(res, desired) class TestSplit: # The split function is essentially the same as array_split, # except that it test if splitting will result in an # equal split. Only test for this case. def test_equal_split(self): a = np.arange(10) res = split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) def test_unequal_split(self): a = np.arange(10) assert_raises(ValueError, split, a, 3) class TestColumnStack: def test_non_iterable(self): assert_raises(TypeError, column_stack, 1) def test_1D_arrays(self): # example from docstring a = np.array((1, 2, 3)) b = np.array((2, 3, 4)) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_2D_arrays(self): # same as hstack 2D docstring example a = np.array([[1], [2], [3]]) b = np.array([[2], [3], [4]]) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_generator(self): with assert_warns(FutureWarning): column_stack((np.arange(3) for _ in range(2))) class TestDstack: def test_non_iterable(self): assert_raises(TypeError, dstack, 1) def test_0D_array(self): a = np.array(1) b = np.array(2) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_1D_array(self): a = np.array([1]) b = np.array([2]) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_2D_array(self): a = np.array([[1], [2]]) b = np.array([[1], [2]]) res = dstack([a, b]) desired = np.array([[[1, 1]], [[2, 2, ]]]) assert_array_equal(res, desired) def test_2D_array2(self): a = np.array([1, 2]) b = np.array([1, 2]) res = dstack([a, b]) desired = np.array([[[1, 1], [2, 2]]]) assert_array_equal(res, desired) def test_generator(self): with assert_warns(FutureWarning): dstack((np.arange(3) for _ in range(2))) # array_split has more comprehensive test of splitting. # only do simple test on hsplit, vsplit, and dsplit class TestHsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, hsplit, 1, 1) def test_0D_array(self): a = np.array(1) try: hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): a = np.array([1, 2, 3, 4]) res = hsplit(a, 2) desired = [np.array([1, 2]), np.array([3, 4])] compare_results(res, desired) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = hsplit(a, 2) desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] compare_results(res, desired) class TestVsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired) class TestDsplit: # Only testing for integer splits. def test_non_iterable(self): assert_raises(ValueError, dsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, dsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) assert_raises(ValueError, dsplit, a, 2) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) try: dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): a = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]], [[1, 2, 3, 4], [1, 2, 3, 4]]]) res = dsplit(a, 2) desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] compare_results(res, desired) class TestSqueeze: def test_basic(self): from numpy.random import rand a = rand(20, 10, 10, 1, 1) b = rand(20, 1, 10, 1, 20) c = rand(1, 1, 20, 10) assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] res = np.squeeze(a) assert_equal(res, 1.5) assert_equal(res.ndim, 0) assert_equal(type(res), np.ndarray) class TestKron: def test_return_type(self): class myarray(np.ndarray): __array_priority__ = 0.0 a = np.ones([2, 2]) ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) assert_equal(type(kron(a, ma)), np.ndarray) assert_equal(type(kron(ma, a)), myarray) class TestTile: def test_basic(self): a = np.array([0, 1, 2]) b = [[1, 2], [3, 4]] assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) def test_tile_one_repetition_on_array_gh4679(self): a = np.arange(5) b = tile(a, 1) b += 2 assert_equal(a, np.arange(5)) def test_empty(self): a = np.array([[[]]]) b = np.array([[], []]) c = tile(b, 2).shape d = tile(a, (3, 2, 5)).shape assert_equal(c, (2, 0)) assert_equal(d, (3, 2, 0)) def test_kroncompare(self): from numpy.random import randint reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = randint(0, 10, size=s) for r in reps: a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) class TestMayShareMemory: def test_basic(self): d = np.ones((50, 60)) d2 = np.ones((30, 60, 6)) assert_(np.may_share_memory(d, d)) assert_(np.may_share_memory(d, d[::-1])) assert_(np.may_share_memory(d, d[::2])) assert_(np.may_share_memory(d, d[1:, ::-1])) assert_(not np.may_share_memory(d[::-1], d2)) assert_(not np.may_share_memory(d[::2], d2)) assert_(not np.may_share_memory(d[1:, ::-1], d2)) assert_(np.may_share_memory(d2[1:, ::-1], d2)) # Utility def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i])
mhvk/numpy
numpy/lib/tests/test_shape_base.py
numpy/typing/tests/data/reveal/arithmetic.py
import numpy as np from numpy.testing import assert_, assert_equal, assert_array_equal def buffer_length(arr): if isinstance(arr, str): if not arr: charmax = 0 else: charmax = max([ord(c) for c in arr]) if charmax < 256: size = 1 elif charmax < 65536: size = 2 else: size = 4 return size * len(arr) v = memoryview(arr) if v.shape is None: return len(v) * v.itemsize else: return np.prod(v.shape) * v.itemsize # In both cases below we need to make sure that the byte swapped value (as # UCS4) is still a valid unicode: # Value that can be represented in UCS2 interpreters ucs2_value = u'\u0900' # Value that cannot be represented in UCS2 interpreters (but can in UCS4) ucs4_value = u'\U00100900' def test_string_cast(): str_arr = np.array(["1234", "1234\0\0"], dtype='S') uni_arr1 = str_arr.astype('>U') uni_arr2 = str_arr.astype('<U') assert_(str_arr != uni_arr1) assert_(str_arr != uni_arr2) assert_array_equal(uni_arr1, uni_arr2) ############################################################ # Creation tests ############################################################ class CreateZeros: """Check the creation of zero-valued arrays""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type assert_(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer assert_(buffer_length(ua) == nbytes) # Small check that data in array element is ok assert_(ua_scalar == u'') # Encode to ascii and double check assert_(ua_scalar.encode('ascii') == b'') # Check buffer lengths for scalars assert_(buffer_length(ua_scalar) == 0) def test_zeros0D(self): # Check creation of 0-dimensional objects ua = np.zeros((), dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) def test_zerosSD(self): # Check creation of single-dimensional objects ua = np.zeros((2,), dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) def test_zerosMD(self): # Check creation of multi-dimensional objects ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen) self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class TestCreateZeros_1(CreateZeros): """Check the creation of zero-valued arrays (size 1)""" ulen = 1 class TestCreateZeros_2(CreateZeros): """Check the creation of zero-valued arrays (size 2)""" ulen = 2 class TestCreateZeros_1009(CreateZeros): """Check the creation of zero-valued arrays (size 1009)""" ulen = 1009 class CreateValues: """Check the creation of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type assert_(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer assert_(buffer_length(ua) == nbytes) # Small check that data in array element is ok assert_(ua_scalar == self.ucs_value*self.ulen) # Encode to UTF-8 and double check assert_(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars if self.ucs_value == ucs4_value: # In UCS2, the \U0010FFFF will be represented using a # surrogate *pair* assert_(buffer_length(ua_scalar) == 2*2*self.ulen) else: # In UCS2, the \uFFFF will be represented using a # regular 2-byte word assert_(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check creation of 0-dimensional objects with values ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) def test_valuesSD(self): # Check creation of single-dimensional objects with values ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) def test_valuesMD(self): # Check creation of multi-dimensional objects with values ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class TestCreateValues_1_UCS2(CreateValues): """Check the creation of valued arrays (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value class TestCreateValues_1_UCS4(CreateValues): """Check the creation of valued arrays (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value class TestCreateValues_2_UCS2(CreateValues): """Check the creation of valued arrays (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value class TestCreateValues_2_UCS4(CreateValues): """Check the creation of valued arrays (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value class TestCreateValues_1009_UCS2(CreateValues): """Check the creation of valued arrays (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value class TestCreateValues_1009_UCS4(CreateValues): """Check the creation of valued arrays (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value ############################################################ # Assignment tests ############################################################ class AssignValues: """Check the assignment of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type assert_(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer assert_(buffer_length(ua) == nbytes) # Small check that data in array element is ok assert_(ua_scalar == self.ucs_value*self.ulen) # Encode to UTF-8 and double check assert_(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars if self.ucs_value == ucs4_value: # In UCS2, the \U0010FFFF will be represented using a # surrogate *pair* assert_(buffer_length(ua_scalar) == 2*2*self.ulen) else: # In UCS2, the \uFFFF will be represented using a # regular 2-byte word assert_(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check assignment of 0-dimensional objects with values ua = np.zeros((), dtype='U%s' % self.ulen) ua[()] = self.ucs_value*self.ulen self.content_check(ua, ua[()], 4*self.ulen) def test_valuesSD(self): # Check assignment of single-dimensional objects with values ua = np.zeros((2,), dtype='U%s' % self.ulen) ua[0] = self.ucs_value*self.ulen self.content_check(ua, ua[0], 4*self.ulen*2) ua[1] = self.ucs_value*self.ulen self.content_check(ua, ua[1], 4*self.ulen*2) def test_valuesMD(self): # Check assignment of multi-dimensional objects with values ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen) ua[0, 0, 0] = self.ucs_value*self.ulen self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) ua[-1, -1, -1] = self.ucs_value*self.ulen self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class TestAssignValues_1_UCS2(AssignValues): """Check the assignment of valued arrays (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value class TestAssignValues_1_UCS4(AssignValues): """Check the assignment of valued arrays (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value class TestAssignValues_2_UCS2(AssignValues): """Check the assignment of valued arrays (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value class TestAssignValues_2_UCS4(AssignValues): """Check the assignment of valued arrays (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value class TestAssignValues_1009_UCS2(AssignValues): """Check the assignment of valued arrays (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value class TestAssignValues_1009_UCS4(AssignValues): """Check the assignment of valued arrays (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value ############################################################ # Byteorder tests ############################################################ class ByteorderValues: """Check the byteorder of unicode arrays in round-trip conversions""" def test_values0D(self): # Check byteorder of 0-dimensional objects ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() # This changes the interpretation of the data region (but not the # actual data), therefore the returned scalars are not # the same (they are byte-swapped versions of each other). assert_(ua[()] != ua2[()]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) def test_valuesSD(self): # Check byteorder of single-dimensional objects ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() assert_((ua != ua2).all()) assert_(ua[-1] != ua2[-1]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) def test_valuesMD(self): # Check byteorder of multi-dimensional objects ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() assert_((ua != ua2).all()) assert_(ua[-1, -1, -1] != ua2[-1, -1, -1]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) def test_values_cast(self): # Check byteorder of when casting the array for a strided and # contiguous array: test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) test2 = np.repeat(test1, 2)[::2] for ua in (test1, test2): ua2 = ua.astype(dtype=ua.dtype.newbyteorder()) assert_((ua == ua2).all()) assert_(ua[-1] == ua2[-1]) ua3 = ua2.astype(dtype=ua.dtype) # Arrays must be equal after the round-trip assert_equal(ua, ua3) def test_values_updowncast(self): # Check byteorder of when casting the array to a longer and shorter # string length for strided and contiguous arrays test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) test2 = np.repeat(test1, 2)[::2] for ua in (test1, test2): # Cast to a longer type with zero padding longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder() ua2 = ua.astype(dtype=longer_type) assert_((ua == ua2).all()) assert_(ua[-1] == ua2[-1]) # Cast back again with truncating: ua3 = ua2.astype(dtype=ua.dtype) # Arrays must be equal after the round-trip assert_equal(ua, ua3) class TestByteorder_1_UCS2(ByteorderValues): """Check the byteorder in unicode (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value class TestByteorder_1_UCS4(ByteorderValues): """Check the byteorder in unicode (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value class TestByteorder_2_UCS2(ByteorderValues): """Check the byteorder in unicode (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value class TestByteorder_2_UCS4(ByteorderValues): """Check the byteorder in unicode (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value class TestByteorder_1009_UCS2(ByteorderValues): """Check the byteorder in unicode (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value class TestByteorder_1009_UCS4(ByteorderValues): """Check the byteorder in unicode (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value
import numpy as np import functools import sys import pytest from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, put_along_axis ) from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, assert_warns ) IS_64BIT = sys.maxsize > 2**32 def _add_keepdims(func): """ hack in keepdims behavior into a function taking an axis """ @functools.wraps(func) def wrapped(a, axis, **kwargs): res = func(a, axis=axis, **kwargs) if axis is None: axis = 0 # res is now a scalar, so we can insert this anywhere return np.expand_dims(res, axis=axis) return wrapped class TestTakeAlongAxis: def test_argequivalent(self): """ Test it translates from arg<func> to <func> """ from numpy.random import rand a = rand(3, 4, 5) funcs = [ (np.sort, np.argsort, dict()), (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), (np.partition, np.argpartition, dict(kth=2)), ] for func, argfunc, kwargs in funcs: for axis in list(range(a.ndim)) + [None]: a_func = func(a, axis=axis, **kwargs) ai_func = argfunc(a, axis=axis, **kwargs) assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) def test_invalid(self): """ Test it errors when indices has too few dimensions """ a = np.ones((10, 10)) ai = np.ones((10, 2), dtype=np.intp) # sanity check take_along_axis(a, ai, axis=1) # not enough indices assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) # bool arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) # float arrays not allowed assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) # invalid axis assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) def test_empty(self): """ Test everything is ok with empty results, even with inserted dims """ a = np.ones((3, 4, 5)) ai = np.ones((3, 0, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, ai.shape) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.ones((1, 2, 5), dtype=np.intp) actual = take_along_axis(a, ai, axis=1) assert_equal(actual.shape, (3, 2, 5)) class TestPutAlongAxis: def test_replace_max(self): a_base = np.array([[10, 30, 20], [60, 40, 50]]) for axis in list(range(a_base.ndim)) + [None]: # we mutate this in the loop a = a_base.copy() # replace the max with a small value i_max = _add_keepdims(np.argmax)(a, axis=axis) put_along_axis(a, i_max, -99, axis=axis) # find the new minimum, which should max i_min = _add_keepdims(np.argmin)(a, axis=axis) assert_equal(i_min, i_max) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) class TestApplyAlongAxis: def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_simple101(self): a = np.ones((10, 101), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) assert_array_equal(apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): def double(row): return row * 2 class MyNDArray(np.ndarray): pass m = np.array([[0, 1], [2, 3]]).view(MyNDArray) expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) result = apply_along_axis(double, 0, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) result = apply_along_axis(double, 1, m) assert_(isinstance(result, MyNDArray)) assert_array_equal(result, expected) def test_subclass(self): class MinimalSubclass(np.ndarray): data = 1 def minimal_function(array): return array.data a = np.zeros((6, 3)).view(MinimalSubclass) assert_array_equal( apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) ) def test_scalar_array(self, cls=np.ndarray): a = np.ones((6, 3)).view(cls) res = apply_along_axis(np.sum, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) def test_0d_array(self, cls=np.ndarray): def sum_to_0d(x): """ Sum x, returning a 0d array of the same class """ assert_equal(x.ndim, 1) return np.squeeze(np.sum(x, keepdims=True)) a = np.ones((6, 3)).view(cls) res = apply_along_axis(sum_to_0d, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) res = apply_along_axis(sum_to_0d, 1, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) return (x[::-1] * x[1:,None]).view(cls) a2d = np.arange(6*3).reshape((6, 3)) # 2d insertion along first axis actual = apply_along_axis(f1to2, 0, a2d) expected = np.stack([ f1to2(a2d[:,i]) for i in range(a2d.shape[1]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 2d insertion along last axis actual = apply_along_axis(f1to2, 1, a2d) expected = np.stack([ f1to2(a2d[i,:]) for i in range(a2d.shape[0]) ], axis=0).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 3d insertion along middle axis a3d = np.arange(6*5*3).reshape((6, 5, 3)) actual = apply_along_axis(f1to2, 1, a3d) expected = np.stack([ np.stack([ f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) ], axis=0) for j in range(a3d.shape[2]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) def test_subclass_preservation(self): class MinimalSubclass(np.ndarray): pass self.test_scalar_array(MinimalSubclass) self.test_0d_array(MinimalSubclass) self.test_axis_insertion(MinimalSubclass) def test_axis_insertion_ma(self): def f1to2(x): """produces an asymmetric non-square matrix from x""" assert_equal(x.ndim, 1) res = x[::-1] * x[1:,None] return np.ma.masked_where(res%5==0, res) a = np.arange(6*3).reshape((6, 3)) res = apply_along_axis(f1to2, 0, a) assert_(isinstance(res, np.ma.masked_array)) assert_equal(res.ndim, 3) assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) def test_tuple_func1d(self): def sample_1d(x): return x[1], x[0] res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) assert_array_equal(res, np.array([[2, 1], [4, 3]])) def test_empty(self): # can't apply_along_axis when there's no chance to call the function def never_call(x): assert_(False) # should never be reached a = np.empty((0, 0)) assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) # but it's sometimes ok with some non-zero dimensions def empty_to_1(x): assert_(len(x) == 0) return 1 a = np.empty((10, 0)) actual = np.apply_along_axis(empty_to_1, 1, a) assert_equal(actual, np.ones(10)) assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) def test_with_iterable_object(self): # from issue 5248 d = np.array([ [{1, 11}, {2, 22}, {3, 33}], [{4, 44}, {5, 55}, {6, 66}] ]) actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) assert_equal(actual, expected) # issue 8642 - assert_equal doesn't detect this! for i in np.ndindex(actual.shape): assert_equal(type(actual[i]), type(expected[i])) class TestApplyOverAxes: def test_simple(self): a = np.arange(24).reshape(2, 3, 4) aoa_a = apply_over_axes(np.sum, a, [0, 2]) assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) class TestExpandDims: def test_functionality(self): s = (2, 3, 4, 5) a = np.empty(s) for axis in range(-5, 4): b = expand_dims(a, axis) assert_(b.shape[axis] == 1) assert_(np.squeeze(b).shape == s) def test_axis_tuple(self): a = np.empty((3, 3, 3)) assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) def test_axis_out_of_range(self): s = (2, 3, 4, 5) a = np.empty(s) assert_raises(np.AxisError, expand_dims, a, -6) assert_raises(np.AxisError, expand_dims, a, 5) a = np.empty((3, 3, 3)) assert_raises(np.AxisError, expand_dims, a, (0, -6)) assert_raises(np.AxisError, expand_dims, a, (0, 5)) def test_repeated_axis(self): a = np.empty((3, 3, 3)) assert_raises(ValueError, expand_dims, a, axis=(1, 1)) def test_subclasses(self): a = np.arange(10).reshape((2, 5)) a = np.ma.array(a, mask=a%3 == 0) expanded = np.expand_dims(a, axis=1) assert_(isinstance(expanded, np.ma.MaskedArray)) assert_equal(expanded.shape, (2, 1, 5)) assert_equal(expanded.mask.shape, (2, 1, 5)) class TestArraySplit: def test_integer_0_split(self): a = np.arange(10) assert_raises(ValueError, array_split, a, 0) def test_integer_split(self): a = np.arange(10) res = array_split(a, 1) desired = [np.arange(10)] compare_results(res, desired) res = array_split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) res = array_split(a, 3) desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] compare_results(res, desired) res = array_split(a, 4) desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 5) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 6) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 7) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 8) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 9) desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 10) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 11) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10), np.array([])] compare_results(res, desired) def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=0) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: res = array_split(a, [0, 1, 2], axis=0) tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) desired = [np.array([np.arange(4), np.arange(4)]), np.array([np.arange(4, 7), np.arange(4, 7)]), np.array([np.arange(7, 10), np.arange(7, 10)])] compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") def test_integer_split_2D_rows_greater_max_int32(self): a = np.broadcast_to([0], (1 << 32, 2)) res = array_split(a, 4) chunk = np.broadcast_to([0], (1 << 30, 2)) tgt = [chunk] * 4 for i in range(len(tgt)): assert_equal(res[i].shape, tgt[i].shape) def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_low_bound(self): a = np.arange(10) indices = [0, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_high_bound(self): a = np.arange(10) indices = [0, 5, 7, 10, 12] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10), np.array([]), np.array([])] compare_results(res, desired) class TestSplit: # The split function is essentially the same as array_split, # except that it test if splitting will result in an # equal split. Only test for this case. def test_equal_split(self): a = np.arange(10) res = split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) def test_unequal_split(self): a = np.arange(10) assert_raises(ValueError, split, a, 3) class TestColumnStack: def test_non_iterable(self): assert_raises(TypeError, column_stack, 1) def test_1D_arrays(self): # example from docstring a = np.array((1, 2, 3)) b = np.array((2, 3, 4)) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_2D_arrays(self): # same as hstack 2D docstring example a = np.array([[1], [2], [3]]) b = np.array([[2], [3], [4]]) expected = np.array([[1, 2], [2, 3], [3, 4]]) actual = np.column_stack((a, b)) assert_equal(actual, expected) def test_generator(self): with assert_warns(FutureWarning): column_stack((np.arange(3) for _ in range(2))) class TestDstack: def test_non_iterable(self): assert_raises(TypeError, dstack, 1) def test_0D_array(self): a = np.array(1) b = np.array(2) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_1D_array(self): a = np.array([1]) b = np.array([2]) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_2D_array(self): a = np.array([[1], [2]]) b = np.array([[1], [2]]) res = dstack([a, b]) desired = np.array([[[1, 1]], [[2, 2, ]]]) assert_array_equal(res, desired) def test_2D_array2(self): a = np.array([1, 2]) b = np.array([1, 2]) res = dstack([a, b]) desired = np.array([[[1, 1], [2, 2]]]) assert_array_equal(res, desired) def test_generator(self): with assert_warns(FutureWarning): dstack((np.arange(3) for _ in range(2))) # array_split has more comprehensive test of splitting. # only do simple test on hsplit, vsplit, and dsplit class TestHsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, hsplit, 1, 1) def test_0D_array(self): a = np.array(1) try: hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): a = np.array([1, 2, 3, 4]) res = hsplit(a, 2) desired = [np.array([1, 2]), np.array([3, 4])] compare_results(res, desired) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = hsplit(a, 2) desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] compare_results(res, desired) class TestVsplit: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired) class TestDsplit: # Only testing for integer splits. def test_non_iterable(self): assert_raises(ValueError, dsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, dsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) assert_raises(ValueError, dsplit, a, 2) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) try: dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): a = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]], [[1, 2, 3, 4], [1, 2, 3, 4]]]) res = dsplit(a, 2) desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] compare_results(res, desired) class TestSqueeze: def test_basic(self): from numpy.random import rand a = rand(20, 10, 10, 1, 1) b = rand(20, 1, 10, 1, 20) c = rand(1, 1, 20, 10) assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] res = np.squeeze(a) assert_equal(res, 1.5) assert_equal(res.ndim, 0) assert_equal(type(res), np.ndarray) class TestKron: def test_return_type(self): class myarray(np.ndarray): __array_priority__ = 0.0 a = np.ones([2, 2]) ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) assert_equal(type(kron(a, ma)), np.ndarray) assert_equal(type(kron(ma, a)), myarray) class TestTile: def test_basic(self): a = np.array([0, 1, 2]) b = [[1, 2], [3, 4]] assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) def test_tile_one_repetition_on_array_gh4679(self): a = np.arange(5) b = tile(a, 1) b += 2 assert_equal(a, np.arange(5)) def test_empty(self): a = np.array([[[]]]) b = np.array([[], []]) c = tile(b, 2).shape d = tile(a, (3, 2, 5)).shape assert_equal(c, (2, 0)) assert_equal(d, (3, 2, 0)) def test_kroncompare(self): from numpy.random import randint reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = randint(0, 10, size=s) for r in reps: a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) class TestMayShareMemory: def test_basic(self): d = np.ones((50, 60)) d2 = np.ones((30, 60, 6)) assert_(np.may_share_memory(d, d)) assert_(np.may_share_memory(d, d[::-1])) assert_(np.may_share_memory(d, d[::2])) assert_(np.may_share_memory(d, d[1:, ::-1])) assert_(not np.may_share_memory(d[::-1], d2)) assert_(not np.may_share_memory(d[::2], d2)) assert_(not np.may_share_memory(d[1:, ::-1], d2)) assert_(np.may_share_memory(d2[1:, ::-1], d2)) # Utility def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i])
mhvk/numpy
numpy/lib/tests/test_shape_base.py
numpy/core/tests/test_unicode.py
""" DataFrame --------- An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information """ import collections from collections import OrderedDict, abc import functools from io import StringIO import itertools import sys import warnings from textwrap import dedent from typing import FrozenSet, List, Optional, Set, Type, Union import numpy as np import numpy.ma as ma from pandas._config import get_option from pandas._libs import lib, algos as libalgos from pandas.util._decorators import (Appender, Substitution, rewrite_axis_style_signature, deprecate_kwarg) from pandas.util._validators import (validate_bool_kwarg, validate_axis_style_args) from pandas.compat import PY36, raise_with_traceback from pandas.compat.numpy import function as nv from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.dtypes.cast import ( maybe_upcast, cast_scalar_to_array, infer_dtype_from_scalar, maybe_cast_to_datetime, maybe_infer_to_datetimelike, maybe_convert_platform, maybe_downcast_to_dtype, invalidate_string_dtypes, coerce_to_dtypes, maybe_upcast_putmask, find_common_type) from pandas.core.dtypes.common import ( is_dict_like, is_datetime64tz_dtype, is_object_dtype, is_extension_type, is_extension_array_dtype, is_datetime64_any_dtype, is_bool_dtype, is_integer_dtype, is_float_dtype, is_integer, is_scalar, is_dtype_equal, needs_i8_conversion, infer_dtype_from_object, ensure_float64, ensure_int64, ensure_platform_int, is_list_like, is_nested_list_like, is_iterator, is_sequence, is_named_tuple) from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, ABCIndexClass, ABCMultiIndex) from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms from pandas.core import common as com from pandas.core import nanops from pandas.core import ops from pandas.core.accessor import CachedAccessor from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import ( DatetimeLikeArrayMixin as DatetimeLikeArray ) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, ensure_index, ensure_index_from_sequences) from pandas.core.indexes import base as ibase from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import BlockManager from pandas.core.internals.construction import ( masked_rec_array_to_mgr, get_names_from_index, to_arrays, reorder_arrays, init_ndarray, init_dict, arrays_to_mgr, sanitize_index) from pandas.core.series import Series from pandas.io.formats import console from pandas.io.formats import format as fmt from pandas.io.formats.printing import pprint_thing import pandas.plotting # --------------------------------------------------------------------- # Docstring templates _shared_doc_kwargs = dict( axes='index, columns', klass='DataFrame', axes_single_arg="{0 or 'index', 1 or 'columns'}", axis="""axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index': apply function to each column. If 1 or 'columns': apply function to each row.""", optional_by=""" by : str or list of str Name or list of names to sort by. - if `axis` is 0 or `'index'` then `by` may contain index levels and/or column labels - if `axis` is 1 or `'columns'` then `by` may contain column levels and/or index labels .. versionchanged:: 0.23.0 Allow specifying index or column level names.""", versionadded_to_excel='', optional_labels="""labels : array-like, optional New labels / index to conform the axis specified by 'axis' to.""", optional_axis="""axis : int or str, optional Axis to target. Can be either the axis name ('index', 'columns') or number (0, 1).""", ) _numeric_only_doc = """numeric_only : boolean, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame or named Series objects with a database-style join. The join is done on columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. Parameters ----------%s right : DataFrame or named Series Object to merge with. how : {'left', 'right', 'outer', 'inner'}, default 'inner' Type of merge to be performed. * left: use only keys from left frame, similar to a SQL left outer join; preserve key order. * right: use only keys from right frame, similar to a SQL right outer join; preserve key order. * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False Use the index from the right DataFrame as the join key. Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). suffixes : tuple of (str, str), default ('_x', '_y') Suffix to apply to overlapping column names in the left and right side, respectively. To raise an exception on overlapping columns use (False, False). copy : bool, default True If False, avoid copy if possible. indicator : bool or str, default False If True, adds a column to output DataFrame called "_merge" with information on the source of each row. If string, column with information on source of each row will be added to output DataFrame, and column will be named value of string. Information column is Categorical-type and takes on a value of "left_only" for observations whose merge key only appears in 'left' DataFrame, "right_only" for observations whose merge key only appears in 'right' DataFrame, and "both" if the observation's merge key is found in both. validate : str, optional If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. .. versionadded:: 0.21.0 Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge_ordered : Merge with optional filling/interpolation. merge_asof : Merge on nearest keys. DataFrame.join : Similar method using indices. Notes ----- Support for specifying index levels as the `on`, `left_on`, and `right_on` parameters was added in version 0.23.0 Support for merging named Series objects was added in version 0.24.0 Examples -------- >>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}) >>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> df1.merge(df2, left_on='lkey', right_on='rkey') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2 with specified left and right suffixes appended to any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', ... suffixes=('_left', '_right')) lkey value_left rkey value_right 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2, but raise an exception if the DataFrames have any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) Traceback (most recent call last): ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') """ # ----------------------------------------------------------------------- # DataFrame class class DataFrame(NDFrame): """ Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects .. versionchanged :: 0.23.0 If data is a dict, argument order is maintained for Python 3.6 and later. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. DataFrame.from_items : From sequence of (key, value) pairs read_csv, pandas.read_table, pandas.read_clipboard. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 """ @property def _constructor(self): return DataFrame _constructor_sliced = Series # type: Type[Series] _deprecations = NDFrame._deprecations | frozenset([ 'get_value', 'set_value', 'from_csv', 'from_items' ]) # type: FrozenSet[str] _accessors = set() # type: Set[str] @property def _constructor_expanddim(self): raise NotImplementedError("Not supported for DataFrames!") # ---------------------------------------------------------------------- # Constructors def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._data if isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy) # a masked array else: mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: data_columns = list(data.dtype.names) data = {k: data[k] for k in data_columns} if columns is None: columns = data_columns mgr = init_dict(data, index, columns, dtype=dtype) elif getattr(data, 'name', None) is not None: mgr = init_dict({data.name: data}, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) # For data is list-like, or Iterable (will consume into list) elif (isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes))): if not isinstance(data, abc.Sequence): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields arrays, columns = to_arrays(data, columns, dtype=dtype) columns = ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = get_names_from_index(data) elif isinstance(data[0], Categorical): index = ibase.default_index(len(data[0])) else: index = ibase.default_index(len(data)) mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) else: mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = init_dict({}, index, columns, dtype=dtype) else: try: arr = np.array(data, dtype=dtype, copy=copy) except (ValueError, TypeError) as e: exc = TypeError('DataFrame constructor called with ' 'incompatible data and dtype: {e}'.format(e=e)) raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: values = cast_scalar_to_array((len(index), len(columns)), data, dtype=dtype) mgr = init_ndarray(values, index, columns, dtype=values.dtype, copy=False) else: raise ValueError('DataFrame constructor not properly called!') NDFrame.__init__(self, mgr, fastpath=True) # ---------------------------------------------------------------------- @property def axes(self): """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) @property def _is_homogeneous_type(self): """ Whether all the columns in a DataFrame have the same type. Returns ------- bool Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if self._data.any_extension_types: return len({block.dtype for block in self._data.blocks}) == 1 else: return not self._data.is_mixed_type # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self): """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width=False): """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case off non-interactive session, no boundaries apply. `ignore_width` is here so ipnb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if ((max_columns and nb_columns > max_columns) or ((not ignore_width) and width and nb_columns > (width // 2))): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or not console.in_interactive_session(): return True if (get_option('display.width') is not None or console.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if not (max_rows is None): # unlimited rows # min of two, where one may be None d = d.iloc[:min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(l) for l in value.split('\n')) return repr_width < width def _info_repr(self): """ True if the repr should show the info view. """ info_repr_option = (get_option("display.large_repr") == "info") return info_repr_option and not (self._repr_fits_horizontal_() and self._repr_fits_vertical_()) def __repr__(self): """ Return a string representation for a particular DataFrame. """ buf = StringIO("") if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = console.get_console_size() else: width = None self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols, line_width=width, show_dimensions=show_dimensions) return buf.getvalue() def _repr_html_(self): """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO("") self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace('<', r'&lt;', 1) val = val.replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, notebook=True) else: return None @Substitution(header='Write out the column names. If a list of strings ' 'is given, it is assumed to be aliases for the ' 'column names', col_space_type='int', col_space='The minimum width of each column') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', line_width=None): """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, line_width=line_width) formatter.to_string() if buf is None: result = formatter.buf.getvalue() return result # ---------------------------------------------------------------------- @property def style(self): """ Property returning a Styler object containing methods for building a styled HTML representation fo the DataFrame. See Also -------- io.formats.style.Styler """ from pandas.io.formats.style import Styler return Styler(self) def iteritems(self): r""" Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content, sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self): """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. See Also -------- itertuples : Iterate over DataFrame rows as namedtuples of the values. iteritems : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k) yield k, s def itertuples(self, index=True, name="Pandas"): """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.iteritems : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. With a large number of columns (>255), regular tuples are returned. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) # Python 3 supports at most 255 arguments to constructor if name is not None and len(self.columns) + index < 256: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) items = iteritems def __len__(self): """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other): """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Serie. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, ' '{s} vs {r}'.format(s=lvals.shape, r=rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: # pragma: no cover raise TypeError('unsupported type: {oth}'.format(oth=type(other))) def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.T.dot(np.transpose(other)).T # ---------------------------------------------------------------------- # IO methods (to / from other formats) @classmethod def from_dict(cls, data, orient='columns', dtype=None, columns=None): """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from ndarray (structured dtype), list of tuples, dict, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d """ index = None orient = orient.lower() if orient == 'index': if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient == 'columns': if columns is not None: raise ValueError("cannot use columns parameter with " "orient='columns'") else: # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype) def to_numpy(self, dtype=None, copy=False): """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ result = np.array(self.values, dtype=dtype, copy=copy) return result def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in self.items()) elif orient.lower().startswith('l'): return into_c((k, v.tolist()) for k, v in self.items()) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', [ list(map(com.maybe_box_datetimelike, t)) for t in self.itertuples(index=False, name=None) ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items()) elif orient.lower().startswith('r'): columns = self.columns.tolist() rows = (dict(zip(columns, row)) for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items()) for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: raise ValueError( "DataFrame index must be unique for orient='index'." ) return into_c((t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples(name=None)) else: raise ValueError("orient '{o}' not understood".format(o=orient)) def to_gbq(self, destination_table, project_id=None, chunksize=None, reauth=False, if_exists='fail', auth_local_webserver=False, table_schema=None, location=None, progress_bar=True, credentials=None, verbose=None, private_key=None): """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists, do nothing. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 verbose : bool, deprecated Deprecated in pandas-gbq version 0.4.0. Use the `logging module to adjust verbosity instead <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and :func:`google.oauth2.service_account.Credentials.from_service_account_info` or :func:`google.oauth2.service_account.Credentials.from_service_account_file` instead. Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq(self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, verbose=verbose, private_key=private_key) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float=False, nrows=None): """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- DataFrame """ # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, 'dtype') and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns = [] for k, v in data.items(): if k in columns: arr_columns.append(k) arrays.append(v) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) if columns is not None: columns = ensure_index(columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float) arr_columns = ensure_index(arr_columns) if columns is not None: columns = ensure_index(columns) else: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if (isinstance(index, str) or not hasattr(index, "__iter__")): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) except Exception: result_index = index if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None .. deprecated:: 0.23.0 Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = "<S{}".format(df.index.str.len().max()) >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if convert_datetime64 is not None: warnings.warn("The 'convert_datetime64' parameter is " "deprecated and will be removed in a future " "version", FutureWarning, stacklevel=2) if index: if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index.values))) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [self[c].get_values() for c in self.columns] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index < index_len: dtype_mapping = index_dtypes name = index_names[index] else: index -= index_len dtype_mapping = column_dtypes name = self.columns[index] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index in dtype_mapping: dtype_mapping = dtype_mapping[index] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" msg = ("Invalid dtype {dtype} specified for " "{element} {name}").format(dtype=dtype_mapping, element=element, name=name) raise ValueError(msg) return np.rec.fromarrays( arrays, dtype={'names': names, 'formats': formats} ) @classmethod def from_items(cls, items, columns=None, orient='columns'): """ Construct a DataFrame from a list of tuples. .. deprecated:: 0.23.0 `from_items` is deprecated and will be removed in a future version. Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>` instead. :meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>` may be used to preserve the key order. Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- DataFrame """ warnings.warn("from_items is deprecated. Please use " "DataFrame.from_dict(dict(items), ...) instead. " "DataFrame.from_dict(OrderedDict(items)) may be used to " "preserve the key order.", FutureWarning, stacklevel=2) keys, values = zip(*items) if orient == 'columns': if columns is not None: columns = ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = ensure_index(keys) arrays = values # GH 17312 # Provide more informative error msg when scalar values passed try: return cls._from_arrays(arrays, columns, None) except ValueError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed try: arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) except TypeError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') else: # pragma: no cover raise ValueError("'orient' must be either 'columns' or 'index'") @classmethod def _from_arrays(cls, arrays, columns, index, dtype=None): mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) return cls(mgr) @classmethod def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=None, infer_datetime_format=False): """ Read CSV file. .. deprecated:: 0.21.0 Use :func:`read_csv` instead. It is preferable to use the more powerful :func:`read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a DataFrame of time series data. This method only differs from the preferred :func:`read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index by default) - `parse_dates` is ``True`` instead of ``False`` (try parsing the index as datetime by default) So a ``pd.DataFrame.from_csv(path)`` can be replaced by ``pd.read_csv(path, index_col=0, parse_dates=True)``. Parameters ---------- path : string file path or file handle / StringIO header : int, default 0 Row to use as header (skip prior rows) sep : string, default ',' Field delimiter index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex is used. Different default from read_table parse_dates : boolean, default True Parse dates. Different default from read_table tupleize_cols : boolean, default False write multi_index columns as a list of tuples (if True) or new (expanded format) if False) infer_datetime_format : boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. Returns ------- DataFrame See Also -------- read_csv """ warnings.warn("from_csv is deprecated. Please use read_csv(...) " "instead. Note that some of the default arguments are " "different, so please refer to the documentation " "for from_csv when changing your function calls", FutureWarning, stacklevel=2) from pandas.io.parsers import read_csv return read_csv(path, header=header, sep=sep, parse_dates=parse_dates, index_col=index_col, encoding=encoding, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame. .. deprecated:: 0.25.0 Implement the sparse version of the DataFrame meaning that any data matching a specific value it's omitted in the representation. The sparse DataFrame allows for a more efficient storage. Parameters ---------- fill_value : float, default None The specific value that should be omitted in the representation. kind : {'block', 'integer'}, default 'block' The kind of the SparseIndex tracking where data is not equal to the fill value: - 'block' tracks only the locations and sizes of blocks of data. - 'integer' keeps an array with all the locations of the data. In most cases 'block' is recommended, since it's more memory efficient. Returns ------- SparseDataFrame The sparse representation of the DataFrame. See Also -------- DataFrame.to_dense : Converts the DataFrame back to the its dense form. Examples -------- >>> df = pd.DataFrame([(np.nan, np.nan), ... (1., np.nan), ... (np.nan, 1.)]) >>> df 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(df) <class 'pandas.core.frame.DataFrame'> >>> sdf = df.to_sparse() # doctest: +SKIP >>> sdf # doctest: +SKIP 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(sdf) # doctest: +SKIP <class 'pandas.core.sparse.frame.SparseDataFrame'> """ warnings.warn("DataFrame.to_sparse is deprecated and will be removed " "in a future version", FutureWarning, stacklevel=2) from pandas.core.sparse.api import SparseDataFrame with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="SparseDataFrame") return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value) @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None, version=114, convert_strl=None): """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- fname : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 version : {114, 117}, default 114 Version to use in the output dta file. Version 114 can be used read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 114 limits string variables to 244 characters or fewer while 117 allows strings with lengths up to 2,000,000 characters. .. versionadded:: 0.23.0 convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. .. versionadded:: 0.23.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters .. versionadded:: 0.19.0 See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ kwargs = {} if version not in (114, 117): raise ValueError('Only formats 114 and 117 supported.') if version == 114: if convert_strl is not None: raise ValueError('strl support is only available when using ' 'format 117') from pandas.io.stata import StataWriter as statawriter else: from pandas.io.stata import StataWriter117 as statawriter kwargs['convert_strl'] = convert_strl writer = statawriter(fname, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, **kwargs) writer.write_file() def to_feather(self, fname): """ Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path """ from pandas.io.feather_format import to_feather to_feather(self, fname) def to_parquet(self, fname, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- fname : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the behavior depends on the chosen engine. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 """ from pandas.io.parquet import to_parquet to_parquet(self, fname, engine, compression=compression, index=index, partition_cols=partition_cols, **kwargs) @Substitution(header='Whether to print column labels, default True', col_space_type='str or int', col_space='The minimum width of each column in CSS length ' 'units. An int is assumed to be px units.\n\n' ' .. versionadded:: 0.25.0\n' ' Ability to use str') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if (justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS): raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, table_id=table_id, render_links=render_links) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) if buf is None: return formatter.buf.getvalue() # ---------------------------------------------------------------------- def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None): """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. By default, the setting in ``pandas.options.display.max_info_columns`` is followed. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. By default, the setting in ``pandas.options.display.max_info_columns`` is used. memory_usage : bool, str, optional Specifies whether total memory usage of the DataFrame elements (including the index) should be displayed. By default, this follows the ``pandas.options.display.memory_usage`` setting. True always show memory usage. False never shows memory usage. A value of 'deep' is equivalent to "True with deep introspection". Memory usage is shown in human-readable units (base-2 representation). Without deep introspection a memory estimation is made based in column dtype and number of rows assuming values consume the same memory amount for corresponding dtypes. With deep memory introspection, a real memory usage calculation is performed at the cost of computational resources. null_counts : bool, optional Whether to show the non-null counts. By default, this is shown only if the frame is smaller than ``pandas.options.display.max_info_rows`` and ``pandas.options.display.max_info_columns``. A value of True always shows the counts, and False never shows the counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. DataFrame.memory_usage: Memory usage of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values, ... "float_col": float_values}) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Data columns (total 3 columns): int_col 5 non-null int64 text_col 5 non-null object float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open("df_info.txt", "w", ... encoding="utf-8") as f: # doctest: +SKIP ... f.write(s) 260 The `memory_usage` parameter allows deep introspection mode, specially useful for big DataFrames and fine-tune memory optimization: >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) >>> df = pd.DataFrame({ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6) ... }) >>> df.info() <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): column_1 1000000 non-null object column_2 1000000 non-null object column_3 1000000 non-null object dtypes: object(3) memory usage: 22.9+ MB >>> df.info(memory_usage='deep') <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): column_1 1000000 non-null object column_2 1000000 non-null object column_3 1000000 non-null object dtypes: object(3) memory usage: 188.8 MB """ if buf is None: # pragma: no cover buf = sys.stdout lines = [] lines.append(str(type(self))) lines.append(self.index._summary()) if len(self.columns) == 0: lines.append('Empty {name}'.format(name=type(self).__name__)) fmt.buffer_put_lines(buf, lines) return cols = self.columns # hack if max_cols is None: max_cols = get_option('display.max_info_columns', len(self.columns) + 1) max_rows = get_option('display.max_info_rows', len(self) + 1) if null_counts is None: show_counts = ((len(self.columns) <= max_cols) and (len(self) < max_rows)) else: show_counts = null_counts exceeds_info_cols = len(self.columns) > max_cols def _verbose_repr(): lines.append('Data columns (total %d columns):' % len(self.columns)) space = max(len(pprint_thing(k)) for k in self.columns) + 4 counts = None tmpl = "{count}{dtype}" if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover raise AssertionError( 'Columns must equal counts ' '({cols:d} != {counts:d})'.format( cols=len(cols), counts=len(counts))) tmpl = "{count} non-null {dtype}" dtypes = self.dtypes for i, col in enumerate(self.columns): dtype = dtypes.iloc[i] col = pprint_thing(col) count = "" if show_counts: count = counts.iloc[i] lines.append(_put_str(col, space) + tmpl.format(count=count, dtype=dtype)) def _non_verbose_repr(): lines.append(self.columns._summary(name='Columns')) def _sizeof_fmt(num, size_qualifier): # returns size in human readable format for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return ("{num:3.1f}{size_q} " "{x}".format(num=num, size_q=size_qualifier, x=x)) num /= 1024.0 return "{num:3.1f}{size_q} {pb}".format(num=num, size_q=size_qualifier, pb='PB') if verbose: _verbose_repr() elif verbose is False: # specifically set to False, not nesc None _non_verbose_repr() else: if exceeds_info_cols: _non_verbose_repr() else: _verbose_repr() counts = self.get_dtype_counts() dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k in sorted(counts.items())] lines.append('dtypes: {types}'.format(types=', '.join(dtypes))) if memory_usage is None: memory_usage = get_option('display.memory_usage') if memory_usage: # append memory usage of df to display size_qualifier = '' if memory_usage == 'deep': deep = True else: # size_qualifier is just a best effort; not guaranteed to catch # all cases (e.g., it misses categorical data even with object # categories) deep = False if ('object' in counts or self.index._is_memory_usage_qualified()): size_qualifier = '+' mem_usage = self.memory_usage(index=True, deep=deep).sum() lines.append("memory usage: {mem}\n".format( mem=_sizeof_fmt(mem_usage, size_qualifier))) fmt.buffer_put_lines(buf, lines) def memory_usage(self, index=True, deep=False): """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 160000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5216 """ result = Series([c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()], index=self.columns) if index: result = Series(self.index.memory_usage(deep=deep), index=['Index']).append(result) return result def transpose(self, *args, **kwargs): """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- copy : bool, default False If True, the underlying data is copied. Otherwise (default), no copy is made if possible. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, dict()) return super().transpose(1, 0, **kwargs) T = property(transpose) # ---------------------------------------------------------------------- # Picklability # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state columns = com._unpickle_array(cols) index = com._unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover # old unpickling (vals, idx, cols), object_state = state index = com._unpickle_array(idx) dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, columns=com._unpickle_array(ocols), copy=False) dm = dm.join(objects) self._data = dm._data # ---------------------------------------------------------------------- # Getting and setting elements def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(index, col, takeable=takeable) def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) return com.maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine try: return engine.get_value(series._values, index) except KeyError: # GH 20629 if self.index.nlevels > 1: # partial indexing forbidden raise except (TypeError, ValueError): pass # we cannot handle direct indexing # use positional col = self.columns.get_loc(col) index = self.index.get_loc(index) return self._get_value(index, col, takeable=True) _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable) def _set_value(self, index, col, value, takeable=False): try: if takeable is True: series = self._iget_item_cache(col) return series._set_value(index, value, takeable=True) series = self._get_item_cache(col) engine = self.index._engine engine.set_value(series._values, index, value) return self except (KeyError, TypeError): # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) return self _set_value.__doc__ = set_value.__doc__ def _ixs(self, i, axis=0): """ Parameters ---------- i : int, slice, or sequence of integers axis : int Notes ----- If slice passed, the resulting data will be a view. """ # irow if axis == 0: if isinstance(i, slice): return self[i] else: label = self.index[i] if isinstance(label, Index): # a location index by definition result = self.take(i, axis=axis) copy = True else: new_values = self._data.fast_xs(i) if is_scalar(new_values): return new_values # if we are a copy, mark as such copy = (isinstance(new_values, np.ndarray) and new_values.base is None) result = self._constructor_sliced(new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] if isinstance(i, slice): # need to return view lab_slice = slice(label[0], label[-1]) return self.loc[:, lab_slice] else: if isinstance(label, Index): return self._take(i, axis=1) index_len = len(self.index) # if the values returned are not the same length # as the index (iow a not found value), iget returns # a 0-len ndarray. This is effectively catching # a numpy error (as numpy should really raise) values = self._data.iget(i) if index_len and not len(values): values = np.array([np.nan] * index_len, dtype=object) result = self._box_col_values(values, label) # this is a cached value, mark it so result._set_as_cached(label, self) return result def __getitem__(self, key): key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) # shortcut if the key is in columns try: if self.columns.is_unique and key in self.columns: if self.columns.nlevels > 1: return self._getitem_multilevel(key) return self._get_item_cache(key) except (TypeError, ValueError): # The TypeError correctly catches non hashable "key" (e.g. list) # The ValueError can be removed once GH #21729 is fixed pass # Do we have a slicer (on rows)? indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self._getitem_frame(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True) # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): data = data[key] return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn("Boolean Series key will be reindexed to match " "DataFrame index.", UserWarning, stacklevel=3) elif len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d.' % (len(key), len(self.index))) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self._take(indexer, axis=0) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = self._constructor(new_values, index=self.index, columns=result_columns) result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == '': result = result[''] if isinstance(result, Series): result = self._constructor_sliced(result, index=self.index, name=key) result._set_is_copy(self) return result else: return self._get_item_cache(key) def _getitem_frame(self, key): if key.values.size and not is_bool_dtype(key.values): raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) def query(self, expr, inplace=False, **kwargs): """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. .. versionadded:: 0.25.0 You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. .. versionadded:: 0.18.0 Returns ------- DataFrame DataFrame resulting from the provided query expression. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, str): msg = "expr must be a string to be evaluated, {0} given" raise ValueError(msg.format(type(expr))) kwargs['level'] = kwargs.pop('level', 0) + 1 kwargs['target'] = None res = self.eval(expr, **kwargs) try: new_data = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query new_data = self[res] if inplace: self._update_inplace(new_data) else: return new_data def eval(self, expr, inplace=False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = \ self._get_space_character_free_column_resolvers() resolvers = column_resolvers, index_resolvers if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None): """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' raise TypeError(msg.format(typ=type(obj).__name__)) slices = [slice(None)] * obj.ndim slices[obj._info_axis_number] = indexer return tuple(slices) if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map( lambda x: frozenset(map(infer_dtype_from_object, x)), selection) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on {inc_ex}'.format( inc_ex=(include & exclude))) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(idx, dtype): return idx, functools.partial(issubclass, dtype.type) for idx, f in itertools.starmap(is_dtype_instance_mapper, enumerate(self.dtypes)): if include: # checks for the case of empty include or exclude include_these.iloc[idx] = any(map(f, include)) if exclude: exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[_get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] if values.ndim == 2: return self._constructor(values.T, columns=items, index=self.index) else: return self._box_col_values(values, items) def _box_col_values(self, values, items): """ Provide boxed values for a column. """ klass = self._constructor_sliced return klass(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: return self._setitem_slice(indexer, value) if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) else: # set column self._set_item(key, value) def _setitem_slice(self, key, value): self._check_setitem_copy() self.loc._setitem_with_indexer(key, value) def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError('Item wrong length %d instead of %d!' % (len(key), len(self.index))) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() self.loc._setitem_with_indexer(indexer, value) else: if isinstance(value, DataFrame): if len(value.columns) != len(key): raise ValueError('Columns must be same length as key') for k1, k2 in zip(key, value.columns): self[k1] = value[k2] else: indexer = self.loc._convert_to_indexer(key, axis=1) self._check_setitem_copy() self.loc._setitem_with_indexer((slice(None), indexer), value) def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError( 'Array conditional must be same shape as self' ) key = self._constructor(key, **self._construct_axes_dict()) if key.values.size and not is_bool_dtype(key.values): raise TypeError( 'Must pass DataFrame or 2-d ndarray with boolean values only' ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _ensure_valid_index(self, value): """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value): try: value = Series(value) except (ValueError, NotImplementedError, TypeError): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan) def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional """ self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._data.insert(loc, column, value, allow_duplicates=allow_duplicates) def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data def _sanitize_column(self, key, value, broadcast=True): """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- key : object value : scalar, Series, or array-like broadcast : bool, default True If ``key`` matches multiple duplicate column names in the DataFrame, this parameter indicates whether ``value`` should be tiled so that the returned array contains a (duplicated) column for each occurrence of the key. If False, ``value`` will not be tiled. Returns ------- numpy.ndarray """ def reindexer(value): # reindex if necessary if value.index.equals(self.index) or not len(self.index): value = value._values.copy() else: # GH 4107 try: value = value.reindex(self.index)._values except Exception as e: # duplicate axis if not value.index.is_unique: raise e # other raise TypeError('incompatible index of inserted column ' 'with frame index') return value if isinstance(value, Series): value = reindexer(value) elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) if len(cols) and not cols.equals(value.columns): value = value.reindex(cols, axis=1) # now align rows value = reindexer(value).T elif isinstance(value, ExtensionArray): # Explicitly copy here, instead of in sanitize_index, # as sanitize_index won't copy an EA, even with copy=True value = value.copy() value = sanitize_index(value, self.index, copy=False) elif isinstance(value, Index) or is_sequence(value): # turn me into an ndarray value = sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: value = com.asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): value = value.copy(deep=True) else: value = value.copy() # possibly infer to datetimelike if is_object_dtype(value.dtype): value = maybe_infer_to_datetimelike(value) else: # cast ignores pandas dtypes. so save the dtype first infer_dtype, _ = infer_dtype_from_scalar( value, pandas_dtype=True) # upcast value = cast_scalar_to_array(len(self.index), value) value = maybe_cast_to_datetime(value, infer_dtype) # return internal types directly if is_extension_type(value) or is_extension_array_dtype(value): return value # broadcast across multiple columns if necessary if broadcast and key in self.columns and value.ndim == 1: if (not self.columns.is_unique or isinstance(self.columns, MultiIndex)): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) return np.atleast_2d(np.asarray(value)) @property def _series(self): return {item: Series(self._data.iget(idx), index=self.index, name=item) for idx, item in enumerate(self.columns)} def lookup(self, row_labels, col_labels): """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Returns ------- numpy.ndarray Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes['columns'] if columns is not None: frame = frame._reindex_columns(columns, method, copy, level, fill_value, limit, tolerance) index = axes['index'] if index is not None: frame = frame._reindex_index(index, method, copy, level, fill_value, limit, tolerance) return frame def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan, limit=None, tolerance=None): new_index, indexer = self.index.reindex(new_index, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_columns(self, new_columns, method, copy, level, fill_value=None, limit=None, tolerance=None): new_columns, indexer = self.columns.reindex(new_columns, method=method, level=level, limit=limit, tolerance=tolerance) return self._reindex_with_indexers({1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False) def _reindex_multi(self, axes, copy, fill_value): """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = algorithms.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value) @Appender(_shared_docs['align'] % _shared_doc_kwargs) def align(self, other, join='outer', axis=None, level=None, copy=True, fill_value=None, method=None, limit=None, fill_axis=0, broadcast_axis=None): return super().align(other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.reindex.__doc__) @rewrite_axis_style_signature('labels', [('method', None), ('copy', True), ('level', None), ('fill_value', np.nan), ('limit', None), ('tolerance', None)]) def reindex(self, *args, **kwargs): axes = validate_axis_style_args(self, args, kwargs, 'labels', 'reindex') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('labels', None) return super().reindex(**kwargs) @Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs) def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, limit=None, fill_value=np.nan): return super().reindex_axis(labels=labels, axis=axis, method=method, level=level, copy=copy, limit=limit, fill_value=fill_value) def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If True, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors) @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False), ('level', None), ('errors', 'ignore')]) def rename(self, *args, **kwargs): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('mapper', None) return super().rename(**kwargs) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.fillna.__doc__) def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None, **kwargs): return super().fillna(value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, **kwargs) @Appender(_shared_docs['replace'] % _shared_doc_kwargs) def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad'): return super().replace(to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method) @Appender(_shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0, fill_value=None): return super().shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value) def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): """ Set the DataFrame index using existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and instances of :class:`~collections.abc.Iterator`. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). verify_integrity : bool, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method. Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 Create a MultiIndex using an Index and a column: >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) month sale year 1 2012 1 55 2 2014 4 40 3 2013 7 84 4 2014 10 31 Create a MultiIndex using two Series: >>> s = pd.Series([1, 2, 3, 4]) >>> df.set_index([s, s**2]) month year sale 1 1 1 2012 55 2 4 4 2014 40 3 9 7 2013 84 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(keys, list): keys = [keys] err_msg = ('The parameter "keys" may be a column key, one-dimensional ' 'array, or a list containing only valid column keys and ' 'one-dimensional arrays.') missing = [] for col in keys: if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)): # arrays are fine as long as they are one-dimensional # iterators get converted to list below if getattr(col, 'ndim', 1) != 1: raise ValueError(err_msg) else: # everything else gets tried as a key; see GH 24969 try: found = col in self.columns except TypeError: raise TypeError(err_msg + ' Received column of ' 'type {}'.format(type(col))) else: if not found: missing.append(col) if missing: raise KeyError('None of {} are in the columns'.format(missing)) if inplace: frame = self else: frame = self.copy() arrays = [] names = [] if append: names = [x for x in self.index.names] if isinstance(self.index, ABCMultiIndex): for i in range(self.index.nlevels): arrays.append(self.index._get_level_values(i)) else: arrays.append(self.index) to_remove = [] for col in keys: if isinstance(col, ABCMultiIndex): for n in range(col.nlevels): arrays.append(col._get_level_values(n)) names.extend(col.names) elif isinstance(col, (ABCIndexClass, ABCSeries)): # if Index then not MultiIndex (treated above) arrays.append(col) names.append(col.name) elif isinstance(col, (list, np.ndarray)): arrays.append(col) names.append(None) elif isinstance(col, abc.Iterator): arrays.append(list(col)) names.append(None) # from here, col can only be a column label else: arrays.append(frame[col]._values) names.append(col) if drop: to_remove.append(col) if len(arrays[-1]) != len(self): # check newest element against length of calling frame, since # ensure_index_from_sequences would not raise for append=False. raise ValueError('Length mismatch: Expected {len_self} rows, ' 'received array of length {len_col}'.format( len_self=len(self), len_col=len(arrays[-1]) )) index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index[index.duplicated()].unique() raise ValueError('Index has duplicate keys: {dup}'.format( dup=duplicates)) # use set to handle duplicate column names gracefully in case of drop for c in set(to_remove): del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''): """ Reset the index, or a level of it. Reset the index of the DataFrame, and use the default one instead. If the DataFrame has a MultiIndex, this method can remove one or more levels. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column, and a new sequential index is used: >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = pd.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, col_fill='species') species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, col_fill='genus') genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: new_obj = self else: new_obj = self.copy() def _maybe_casted_values(index, labels=None): values = index._values if not isinstance(index, (PeriodIndex, DatetimeIndex)): if values.dtype == np.object_: values = lib.maybe_convert_objects(values) # if we have the labels, extract the values with a mask if labels is not None: mask = labels == -1 # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's if mask.all(): values = np.empty(len(mask)) values.fill(np.nan) else: values = values.take(labels) # TODO(https://github.com/pandas-dev/pandas/issues/24206) # Push this into maybe_upcast_putmask? # We can't pass EAs there right now. Looks a bit # complicated. # So we unbox the ndarray_values, op, re-box. values_type = type(values) values_dtype = values.dtype if issubclass(values_type, DatetimeLikeArray): values = values._data if mask.any(): values, changed = maybe_upcast_putmask( values, mask, np.nan) if issubclass(values_type, DatetimeLikeArray): values = values_type(values, dtype=values_dtype) return values new_index = ibase.default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if not drop: if isinstance(self.index, MultiIndex): names = [n if n is not None else ('level_%d' % i) for (i, n) in enumerate(self.index.names)] to_insert = zip(self.index.levels, self.index.codes) else: default = 'index' if 'index' not in self else 'level_0' names = ([default] if self.index.name is None else [self.index.name]) to_insert = ((self.index, None),) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if not (level is None or i in level): continue name = names[i] if multi_col: col_name = (list(name) if isinstance(name, tuple) else [name]) if col_fill is None: if len(col_name) not in (1, self.columns.nlevels): raise ValueError("col_fill=None is incompatible " "with incomplete column name " "{}".format(name)) col_fill = col_name[0] lev_num = self.columns._get_level_number(col_level) name_lst = [col_fill] * lev_num + col_name missing = self.columns.nlevels - len(name_lst) name_lst += [col_fill] * missing name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = _maybe_casted_values(lev, lab) new_obj.insert(0, name, level_values) new_obj.index = new_index if not inplace: return new_obj # ---------------------------------------------------------------------- # Reindex-based selection methods @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isna(self): return super().isna() @Appender(_shared_docs['isna'] % _shared_doc_kwargs) def isnull(self): return super().isnull() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notna(self): return super().notna() @Appender(_shared_docs['notna'] % _shared_doc_kwargs) def notnull(self): return super().notnull() def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. deprecated:: 0.23.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): # GH20987 msg = ("supplying multiple axes to axis is deprecated and " "will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- DataFrame """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: inds, = (-duplicated)._ndarray_values.nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated] def duplicated(self, subset=None, keep='first'): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series """ from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT if self.empty: return Series(dtype=bool) def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8', copy=False), len(shape) if subset is None: subset = self.columns elif (not np.iterable(subset) or isinstance(subset, str) or isinstance(subset, tuple) and subset in self.columns): subset = subset, # Verify all columns in subset exist in the queried dataframe # Otherwise, raise a KeyError, same as if you try to __getitem__ with a # key that doesn't exist. diff = Index(subset).difference(self.columns) if not diff.empty: raise KeyError(diff) vals = (col.values for name, col in self.iteritems() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, keep), index=self.index) # ---------------------------------------------------------------------- # Sorting @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_values.__doc__) def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) if not isinstance(by, list): by = [by] if is_sequence(ascending) and len(by) != len(ascending): raise ValueError('Length of ascending (%d) != length of by (%d)' % (len(ascending), len(by))) if len(by) > 1: from pandas.core.sorting import lexsort_indexer keys = [self._get_label_or_level_values(x, axis=axis) for x in by] indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position) indexer = ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort by = by[0] k = self._get_label_or_level_values(by, axis=axis) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = nargsort(k, kind=kind, ascending=ascending, na_position=na_position) new_data = self._data.take(indexer, axis=self._get_block_manager_axis(axis), verify=False) if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_index.__doc__) def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None): # TODO: this can be combined with Series.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') # 10726 if by is not None: warnings.warn("by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) axis = self._get_axis_number(axis) labels = self._get_axis(axis) # make sure that the axis is lexsorted to start # if not we need to reconstruct to get the correct indexer labels = labels._sort_levels_monotonic() if level is not None: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer(labels._get_codes_for_sorting(), orders=ascending, na_position=na_position) else: from pandas.core.sorting import nargsort # Check monotonic-ness before sort an index # GH11080 if ((ascending and labels.is_monotonic_increasing) or (not ascending and labels.is_monotonic_decreasing)): if inplace: return else: return self.copy() indexer = nargsort(labels, kind=kind, ascending=ascending, na_position=na_position) baxis = self._get_block_manager_axis(axis) new_data = self._data.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() if inplace: return self._update_inplace(new_data) else: return self._constructor(new_data).__finalize__(self) def nlargest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() def nsmallest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest() def swaplevel(self, i=-2, j=-1, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- DataFrame .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order, axis=0): """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object) """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result # ---------------------------------------------------------------------- # Arithmetic / combination related def _combine_frame(self, other, func, fill_value=None, level=None): this, other = self.align(other, join='outer', level=level, copy=False) new_index, new_columns = this.index, this.columns def _arith_op(left, right): # for the mixed_type case where we iterate over columns, # _arith_op(left, right) is equivalent to # left._binop(right, func, fill_value=fill_value) left, right = ops.fill_binop(left, right, fill_value) return func(left, right) if ops.should_series_dispatch(this, other, func): # iterate over columns return ops.dispatch_to_series(this, other, _arith_op) else: result = _arith_op(this.values, other.values) return self._constructor(result, index=new_index, columns=new_columns, copy=False) def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join='outer', axis=0, level=level, copy=False) assert left.index.equals(right.index) if left._is_mixed_type or right._is_mixed_type: # operate column-wise; avoid costly object-casting in `.values` return ops.dispatch_to_series(left, right, func) else: # fastpath --> operate directly on values with np.errstate(all="ignore"): new_data = func(left.values.T, right.values).T return self._constructor(new_data, index=left.index, columns=self.columns, copy=False) def _combine_match_columns(self, other, func, level=None): assert isinstance(other, Series) left, right = self.align(other, join='outer', axis=1, level=level, copy=False) assert left.columns.equals(right.index) return ops.dispatch_to_series(left, right, func, axis="columns") def _combine_const(self, other, func): assert lib.is_scalar(other) or np.ndim(other) == 0 return ops.dispatch_to_series(self, other, func) def combine(self, other, func, fill_value=None, overwrite=True): """ Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unnecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value if col not in self.columns: # If self DataFrame does not have col in other DataFrame, # try to promote series, which is all NaN, as other_dtype. new_dtype = other_dtype try: series = series.astype(new_dtype, copy=False) except ValueError: # e.g. new_dtype is integer types pass else: # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) if not is_dtype_equal(other_dtype, new_dtype): otherSeries = otherSeries.astype(new_dtype) arr = func(series, otherSeries) arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns) def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False) @deprecate_kwarg(old_arg_name='raise_conflict', new_arg_name='errors', mapping={False: 'ignore', True: 'raise'}) def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ['ignore', 'raise']: raise ValueError("The parameter errors must be either " "'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all='ignore'): mask = ~filter_func(this) | isna(that) else: if errors == 'raise': mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unnecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that) # ---------------------------------------------------------------------- # Data reshaping _shared_docs['pivot'] = """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation, multiple values will result in a MultiIndex in the columns. See the :ref:`User Guide <reshaping>` for more on reshaping. Parameters ----------%s index : string or object, optional Column to use to make new frame's index. If None, uses existing index. columns : string or object Column to use to make new frame's columns. values : string, object or a list of the previous, optional Column(s) to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns. .. versionchanged :: 0.23.0 Also accept list of column names. Returns ------- DataFrame Returns reshaped DataFrame. Raises ------ ValueError: When there are any `index`, `columns` combinations with multiple values. `DataFrame.pivot_table` when you need to aggregate. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods. Examples -------- >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz') bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar')['baz'] bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) baz zoo bar A B C A B C foo one 1 2 3 x y z two 4 5 6 q w t A ValueError is raised if there are any duplicates. >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 Notice that the first two rows are the same for our `index` and `columns` arguments. >>> df.pivot(index='foo', columns='bar', values='baz') Traceback (most recent call last): ... ValueError: Index contains duplicate entries, cannot reshape """ @Substitution('') @Appender(_shared_docs['pivot']) def pivot(self, index=None, columns=None, values=None): from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) _shared_docs['pivot_table'] = """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ----------%s values : column to aggregate, optional index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : function, list of functions, dict, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) If dict is passed, the key is column to aggregate and value is function or list of functions fill_value : scalar, default None Value to replace missing values with margins : boolean, default False Add all row / columns (e.g. for subtotal / grand totals) dropna : boolean, default True Do not include columns whose entries are all NaN margins_name : string, default 'All' Name of the row / column that will contain the totals when margins is True. observed : boolean, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionchanged :: 0.25.0 Returns ------- DataFrame See Also -------- DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. Examples -------- >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 We can also fill missing values using the `fill_value` parameter. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 The next example aggregates by taking the mean across multiple columns. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': np.mean}) >>> table D E A C bar large 5.500000 7.500000 small 5.500000 8.500000 foo large 2.000000 4.500000 small 2.333333 4.333333 We can also calculate multiple types of aggregations for any given value column. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E mean max mean min A C bar large 5.500000 9.0 7.500000 6.0 small 5.500000 9.0 8.500000 8.0 foo large 2.000000 5.0 4.500000 4.0 small 2.333333 6.0 4.333333 2.0 """ @Substitution('') @Appender(_shared_docs['pivot_table']) def pivot_table(self, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All', observed=False): from pandas.core.reshape.pivot import pivot_table return pivot_table(self, values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed) def stack(self, level=-1, dropna=True): """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Parameters ---------- level : int, str, list, default -1 Level(s) to stack from the column axis onto the index axis, defined as one index or label, or a list of indices or labels. dropna : bool, default True Whether to drop rows in the resulting Frame/Series with missing values. Stacking a column level onto the index axis can create combinations of index and column values that are missing from the original dataframe. See Examples section. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack() cat weight 0 height 1 dog weight 2 height 3 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack() height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN **Prescribing the level(s) to be stacked** The first parameter controls which level or levels are stacked: >>> df_multi_level_cols2.stack(0) kg m cat height NaN 2.0 weight 1.0 NaN dog height NaN 4.0 weight 3.0 NaN >>> df_multi_level_cols2.stack([0, 1]) cat height m 2.0 weight kg 1.0 dog height m 4.0 weight kg 3.0 dtype: float64 **Dropping missing values** >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]], ... index=['cat', 'dog'], ... columns=multicol2) Note that rows where all values are missing are dropped by default but this behaviour can be controlled via the dropna keyword parameter: >>> df_multi_level_cols3 weight height kg m cat NaN 1.0 dog 2.0 3.0 >>> df_multi_level_cols3.stack(dropna=False) height weight cat kg NaN NaN m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN >>> df_multi_level_cols3.stack(dropna=True) height weight cat m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN """ from pandas.core.reshape.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) else: return stack(self, level, dropna=dropna) def unstack(self, level=-1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels, returning a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name fill_value : replace NaN with this value if the unstack produces missing values .. versionadded:: 0.18.0 Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) _shared_docs['melt'] = (""" Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. %(versionadded)s Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar Name to use for the 'variable' column. If None it uses ``frame.columns.name`` or 'variable'. value_name : scalar, default 'value' Name to use for the 'value' column. col_level : int or string, optional If columns are a MultiIndex then use this level to melt. Returns ------- DataFrame Unpivoted DataFrame. See Also -------- %(other)s pivot_table DataFrame.pivot Examples -------- >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)sid_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=['A'], value_vars=['B', 'C']) A variable value 0 a B 1 1 b B 3 2 c B 5 3 a C 2 4 b C 4 5 c C 6 The names of 'variable' and 'value' columns can be customized: >>> %(caller)sid_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 If you have multi-index columns: >>> df.columns = [list('ABC'), list('DEF')] >>> df A B C D E F 0 a 1 2 1 b 3 4 2 c 5 6 >>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B']) A variable value 0 a B 1 1 b B 3 2 c B 5 >>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')]) (A, D) variable_0 variable_1 value 0 a B E 1 1 b B E 3 2 c B E 5 """) @Appender(_shared_docs['melt'] % dict(caller='df.melt(', versionadded='.. versionadded:: 0.20.0\n', other='melt')) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): from pandas.core.reshape.melt import melt return melt(self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level) # ---------------------------------------------------------------------- # Time series-related def diff(self, periods=1, axis=0): """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : {0 or 'index', 1 or 'columns'}, default 0 Take difference over rows (0) or columns (1). .. versionadded:: 0.16.1. Returns ------- DataFrame See Also -------- Series.diff: First discrete difference for a Series. DataFrame.pct_change: Percent change over given number of periods. DataFrame.shift: Shift index by desired number of periods with an optional time freq. Examples -------- Difference with previous row >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(axis=1) a b c 0 NaN 0.0 0.0 1 NaN -1.0 3.0 2 NaN -1.0 7.0 3 NaN -1.0 13.0 4 NaN 0.0 20.0 5 NaN 2.0 28.0 Difference with 3rd previous row >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ bm_axis = self._get_block_manager_axis(axis) new_data = self._data.diff(n=periods, axis=bm_axis) return self._constructor(new_data) # ---------------------------------------------------------------------- # Function application def _gotitem(self, key: Union[str, List[str]], ndim: int, subset: Optional[Union[Series, ABCDataFrame]] = None, ) -> Union[Series, ABCDataFrame]: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if subset is None: subset = self elif subset.ndim == 1: # is Series return subset # TODO: _shallow_copy(subset)? return subset[key] _agg_summary_and_see_also_doc = dedent(""" The aggregation operations are always performed over an axis, either the index (default) or the column axis. This behavior is different from `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, `var`), where the default is to compute the aggregation of the flattened array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d, axis=0)``. `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Perform any type of operations. DataFrame.transform : Perform transformation type operations. core.groupby.GroupBy : Perform operations over groups. core.resample.Resampler : Perform operations over resampled bins. core.window.Rolling : Perform operations over rolling window. core.window.Expanding : Perform operations over expanding window. core.window.EWM : Perform operation over exponential weighted window. """) _agg_examples_doc = dedent(""" Examples -------- >>> df = pd.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) Aggregate these functions over the rows. >>> df.agg(['sum', 'min']) A B C sum 12.0 15.0 18.0 min 1.0 2.0 3.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN Aggregate over the columns. >>> df.agg("mean", axis="columns") 0 2.0 1 5.0 2 8.0 3 NaN dtype: float64 """) @Substitution(see_also=_agg_summary_and_see_also_doc, examples=_agg_examples_doc, versionadded='\n.. versionadded:: 0.20.0\n', **_shared_doc_kwargs) @Appender(_shared_docs['aggregate']) def aggregate(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) result = None try: result, how = self._aggregate(func, axis=axis, *args, **kwargs) except TypeError: pass if result is None: return self.apply(func, axis=axis, args=args, **kwargs) return result def _aggregate(self, arg, axis=0, *args, **kwargs): if axis == 1: # NDFrame.aggregate returns a tuple, and we need to transpose # only result result, how = self.T._aggregate(arg, *args, **kwargs) result = result.T if result is not None else result return result, how return super()._aggregate(arg, *args, **kwargs) agg = aggregate @Appender(_shared_docs['transform'] % _shared_doc_kwargs) def transform(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) if axis == 1: return self.T.transform(func, *args, **kwargs).T return super().transform(func, *args, **kwargs) def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, args=(), **kwds): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. broadcast : bool, optional Only relevant for aggregation functions: * ``False`` or ``None`` : returns a Series whose length is the length of the index or the number of columns (based on the `axis` parameter) * ``True`` : results will be broadcast to the original shape of the frame, the original index and columns will be retained. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. raw : bool, default False * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. reduce : bool or None, default None Try to apply reduction procedures. If the DataFrame is empty, `apply` will use `reduce` to determine whether the result should be a Series or a DataFrame. If ``reduce=None`` (the default), `apply`'s return value will be guessed by calling `func` on an empty Series (note: while guessing, exceptions raised by `func` will be ignored). If ``reduce=True`` a Series will always be returned, and if ``reduce=False`` a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by ``result_type='reduce'``. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Notes ----- In the current implementation apply calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing result_type='expand' will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2 """ from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, broadcast=broadcast, raw=raw, reduce=reduce, result_type=result_type, args=args, kwds=kwds) return op.get_result() def applymap(self, func): """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Notes ----- In the current implementation applymap calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func) return lib.map_infer(x.astype(object).values, func) return self.apply(infer) # ---------------------------------------------------------------------- # Merging / joining methods def append(self, other, ignore_index=False, verify_integrity=False, sort=None): """ Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default None Sort columns if the columns of `self` and `other` are not aligned. The default sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- concat : General function to concatenate DataFrame or Series objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') if other.name is None: index = None else: # other must have the same index name as self, otherwise # index name will be reset index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) try: combined_columns = self.columns.append(idx_diff) except TypeError: combined_columns = self.columns.astype(object).append(idx_diff) other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns) other = other._convert(datetime=True, timedelta=True) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.reindex(columns=self.columns) from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort) def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort) def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): from pandas.core.reshape.merge import merge from pandas.core.reshape.concat import concat if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = DataFrame({other.name: other}) if isinstance(other, DataFrame): return merge(self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort) else: if on is not None: raise ValueError('Joining multiple DataFrames only supported' ' for joining on index') frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) # join indexes only using concat if can_concat: if how == 'left': how = 'outer' join_axes = [self.index] else: join_axes = None return concat(frames, axis=1, join=how, join_axes=join_axes, verify_integrity=True) joined = frames[0] for frame in frames[1:]: joined = merge(joined, frame, how=how, left_index=True, right_index=True) return joined @Substitution('') @Appender(_merge_doc, indents=2) def merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None): from pandas.core.reshape.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate) def round(self, decimals=0, *args, **kwargs): """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. *args Additional keywords have no effect but might be accepted for compatibility with numpy. **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame A DataFrame with the affected columns rounded to the specified number of decimal places. See Also -------- numpy.around : Round a numpy array to the given number of decimals. Series.round : Round a Series to the given number of decimals. Examples -------- >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.21 0.32 1 0.01 0.67 2 0.66 0.03 3 0.21 0.18 By providing an integer each column is rounded to the same number of decimal places >>> df.round(1) dogs cats 0 0.2 0.3 1 0.0 0.7 2 0.7 0.0 3 0.2 0.2 With a dict, the number of places for specific columns can be specified with the column names as key and the number of decimal places as value >>> df.round({'dogs': 1, 'cats': 0}) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 Using a Series, the number of places for specific columns can be specified with the column names as index and the number of decimal places as value >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) >>> df.round(decimals) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 """ from pandas.core.reshape.concat import concat def _dict_round(df, decimals): for col, vals in df.iteritems(): try: yield _series_round(vals, decimals[col]) except KeyError: yield vals def _series_round(s, decimals): if is_integer_dtype(s) or is_float_dtype(s): return s.round(decimals) return s nv.validate_round(args, kwargs) if isinstance(decimals, (dict, Series)): if isinstance(decimals, Series): if not decimals.index.is_unique: raise ValueError("Index of decimals must be unique") new_cols = [col for col in _dict_round(self, decimals)] elif is_integer(decimals): # Dispatch to Series.round new_cols = [_series_round(v, decimals) for _, v in self.iteritems()] else: raise TypeError("decimals must be an integer, a dict-like or a " "Series") if len(new_cols) > 0: return self._constructor(concat(new_cols, axis=1), index=self.index, columns=self.columns) else: return self # ---------------------------------------------------------------------- # Statistical methods, etc. def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if method == 'pearson': correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) elif method == 'kendall' or callable(method): if min_periods is None: min_periods = 1 mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1. elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method)) return self._constructor(correl, index=idx, columns=cols) def cov(self, min_periods=None): """ Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. core.window.EWM.cov: Exponential weighted sample covariance. core.window.Expanding.cov : Expanding sample covariance. core.window.Rolling.cov : Rolling sample covariance. Notes ----- Returns the covariance matrix of the DataFrame's time series. The covariance is normalized by N-1. For DataFrames that have Series that are missing data (assuming that data is `missing at random <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__) the returned covariance matrix will be an unbiased estimate of the variance and covariance between the member Series. However, for many applications this estimate may not be acceptable because the estimate covariance matrix is not guaranteed to be positive semi-definite. This could lead to estimate correlations having absolute values which are greater than one, and/or a non-invertible covariance matrix. See `Estimation of covariance matrices <http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_ matrices>`__ for more details. Examples -------- >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> df.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if notna(mat).all(): if min_periods is not None and min_periods > len(mat): baseCov = np.empty((mat.shape[1], mat.shape[1])) baseCov.fill(np.nan) else: baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=idx, columns=cols) def corrwith(self, other, axis=0, drop=False, method='pearson'): """ Compute pairwise correlation between rows or columns of DataFrame with rows or columns of Series or DataFrame. DataFrames are first aligned along both axes before computing the correlations. Parameters ---------- other : DataFrame, Series Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. drop : bool, default False Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float .. versionadded:: 0.24.0 Returns ------- Series Pairwise correlations. See Also -------- DataFrame.corr """ axis = self._get_axis_number(axis) this = self._get_numeric_data() if isinstance(other, Series): return this.apply(lambda x: other.corr(x, method=method), axis=axis) other = other._get_numeric_data() left, right = this.align(other, join='inner', copy=False) if axis == 1: left = left.T right = right.T if method == 'pearson': # mask missing values left = left + right * 0 right = right + left * 0 # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom elif method in ['kendall', 'spearman'] or callable(method): def c(x): return nanops.nancorr(x[0], x[1], method=method) correl = Series(map(c, zip(left.values.T, right.values.T)), index=left.columns) else: raise ValueError("Invalid method {method} was passed, " "valid methods are: 'pearson', 'kendall', " "'spearman', or callable". format(method=method)) if not drop: # Find non-matching labels along the given axis # and append missing correlations (GH 22375) raxis = 1 if axis == 0 else 0 result_index = (this._get_axis(raxis). union(other._get_axis(raxis))) idx_diff = result_index.difference(correl.index) if len(idx_diff) > 0: correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff)) return correl # ---------------------------------------------------------------------- # ndarray-like stats methods def count(self, axis=0, level=None, numeric_only=False): """ Count non-NA cells for each column or row. The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending on `pandas.options.mode.use_inf_as_na`) are considered NA. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index' counts are generated for each column. If 1 or 'columns' counts are generated for each **row**. level : int or str, optional If the axis is a `MultiIndex` (hierarchical), count along a particular `level`, collapsing into a `DataFrame`. A `str` specifies the level name. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns ------- Series or DataFrame For each column/row the number of non-NA/null entries. If `level` is specified returns a `DataFrame`. See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = pd.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 Counts for each **row**: >>> df.count(axis='columns') 0 3 1 2 2 3 3 3 4 3 dtype: int64 Counts for one level of a `MultiIndex`: >>> df.set_index(["Person", "Single"]).count(level="Person") Age Person John 2 Lewis 1 Myla 1 """ axis = self._get_axis_number(axis) if level is not None: return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = Series(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type or frame._data.any_extension_types: # the or any_extension_types is really only hit for single- # column frames with an extension array result = notna(frame).sum(axis=axis) else: # GH13407 series_counts = notna(frame).sum(axis=axis) counts = series_counts.values result = Series(counts, index=frame._get_agg_axis(axis)) return result.astype('int64') def _count_level(self, level, axis=0, numeric_only=False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError("Can only count levels on hierarchical " "{ax}.".format(ax=self._get_axis_name(axis))) if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes mask = notna(frame.values) if axis == 1: # We're transposing the mask rather than frame to avoid potential # upcasts to object, which induces a ~20x slowdown mask = mask.T if isinstance(level, str): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) if axis == 1: # Undo our earlier transpose return result.T else: return result def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): if axis is None and filter_type == 'bool': labels = None constructor = None else: # TODO: Make other agg func handle axis=None properly axis = self._get_axis_number(axis) labels = self._get_agg_axis(axis) constructor = self._constructor def f(x): return op(x, axis=axis, skipna=skipna, **kwds) # exclude timedelta/datetime unless we are uniform types if (axis == 1 and self._is_datelike_mixed_type and (not self._is_homogeneous_type and not is_datetime64tz_dtype(self.dtypes[0]))): numeric_only = True if numeric_only is None: try: values = self.values result = f(values) if (filter_type == 'bool' and is_object_dtype(values) and axis is None): # work around https://github.com/numpy/numpy/issues/10489 # TODO: combine with hasattr(result, 'dtype') further down # hard since we don't have `values` down there. result = np.bool_(result) except Exception as e: # try by-column first if filter_type is None and axis == 0: try: # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series # we only end up here if we have not specified # numeric_only and yet we have tried a # column-by-column reduction, where we have mixed type. # So let's just do what we can from pandas.core.apply import frame_apply opa = frame_apply(self, func=f, result_type='expand', ignore_failures=True) result = opa.get_result() if result.ndim == self.ndim: result = result.iloc[0] return result except Exception: pass if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': data = self._get_bool_data() else: # pragma: no cover e = NotImplementedError( "Handling exception with filter_type {f} not" "implemented.".format(f=filter_type)) raise_with_traceback(e) with np.errstate(all='ignore'): result = f(data.values) labels = data._get_agg_axis(axis) else: if numeric_only: if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': # GH 25101, # GH 24434 data = self._get_bool_data() if axis == 0 else self else: # pragma: no cover msg = ("Generating numeric_only data with filter_type {f}" "not supported.".format(f=filter_type)) raise NotImplementedError(msg) values = data.values labels = data._get_agg_axis(axis) else: values = self.values result = f(values) if hasattr(result, 'dtype') and is_object_dtype(result.dtype): try: if filter_type is None or filter_type == 'numeric': result = result.astype(np.float64) elif filter_type == 'bool' and notna(result).all(): result = result.astype(np.bool_) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can if axis == 0: result = coerce_to_dtypes(result, self.dtypes) if constructor is not None: result = Series(result, index=labels) return result def nunique(self, axis=0, dropna=True): """ Count distinct observations over requested axis. Return Series with number of distinct observations. Can ignore NaN values. .. versionadded:: 0.20.0 Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]}) >>> df.nunique() A 3 B 1 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64 """ return self.apply(Series.nunique, axis=axis, dropna=dropna) def idxmin(self, axis=0, skipna=True): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin Notes ----- This method is the DataFrame version of ``ndarray.argmin``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def idxmax(self, axis=0, skipna=True): """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of maxima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmax Notes ----- This method is the DataFrame version of ``ndarray.argmax``. """ axis = self._get_axis_number(axis) indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna) index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): """ Let's be explicit about this. """ if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num) def mode(self, axis=0, numeric_only=False, dropna=True): """ Get the mode(s) of each element along the selected axis. The mode of a set of values is the value that appears most often. It can be multiple values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to iterate over while searching for the mode: * 0 or 'index' : get mode of each column * 1 or 'columns' : get mode of each row numeric_only : bool, default False If True, only apply to numeric columns. dropna : bool, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- DataFrame The modes of each column or row. See Also -------- Series.mode : Return the highest frequency value in a Series. Series.value_counts : Return the counts of values in a Series. Examples -------- >>> df = pd.DataFrame([('bird', 2, 2), ... ('mammal', 4, np.nan), ... ('arthropod', 8, 0), ... ('bird', 2, np.nan)], ... index=('falcon', 'horse', 'spider', 'ostrich'), ... columns=('species', 'legs', 'wings')) >>> df species legs wings falcon bird 2 2.0 horse mammal 4 NaN spider arthropod 8 0.0 ostrich bird 2 NaN By default, missing values are not considered, and the mode of wings are both 0 and 2. The second row of species and legs contains ``NaN``, because they have only one mode, but the DataFrame has two rows. >>> df.mode() species legs wings 0 bird 2.0 0.0 1 NaN NaN 2.0 Setting ``dropna=False`` ``NaN`` values are considered and they can be the mode (like for wings). >>> df.mode(dropna=False) species legs wings 0 bird 2 NaN Setting ``numeric_only=True``, only the mode of numeric columns is computed, and columns of other types are ignored. >>> df.mode(numeric_only=True) legs wings 0 2.0 0.0 1 NaN 2.0 To compute the mode over columns and not rows, use the axis parameter: >>> df.mode(axis='columns', numeric_only=True) 0 1 falcon 2.0 NaN horse 4.0 NaN spider 0.0 8.0 ostrich 2.0 NaN """ data = self if not numeric_only else self._get_numeric_data() def f(s): return s.mode(dropna=dropna) return data.apply(f, axis=axis) def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation='linear'): """ Return values at the given quantile over requested axis. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value between 0 <= q <= 1, the quantile(s) to compute. axis : {0, 1, 'index', 'columns'} (default 0) Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. .. versionadded:: 0.18.0 Returns ------- Series or DataFrame If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. See Also -------- core.window.Rolling.quantile: Rolling quantile. numpy.percentile: Numpy function to compute the percentile. Examples -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), ... columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 Name: 0.1, dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 Specifying `numeric_only=False` will also compute the quantile of datetime and timedelta data. >>> df = pd.DataFrame({'A': [1, 2], ... 'B': [pd.Timestamp('2010'), ... pd.Timestamp('2011')], ... 'C': [pd.Timedelta('1 days'), ... pd.Timedelta('2 days')]}) >>> df.quantile(0.5, numeric_only=False) A 1.5 B 2010-07-02 12:00:00 C 1 days 12:00:00 Name: 0.5, dtype: object """ self._check_percentile(q) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) is_transposed = axis == 1 if is_transposed: data = data.T result = data._data.quantile(qs=q, axis=1, interpolation=interpolation, transposed=is_transposed) if result.ndim == 2: result = self._constructor(result) else: result = self._constructor_sliced(result, name=q) if is_transposed: result = result.T return result def to_timestamp(self, freq=None, how='start', axis=0, copy=True): """ Cast to DatetimeIndex of timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- DataFrame with DatetimeIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how)) elif axis == 1: new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( ax=axis)) return self._constructor(new_data) def to_period(self, freq=None, axis=0, copy=True): """ Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed). Parameters ---------- freq : str, default Frequency of the PeriodIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- TimeSeries with PeriodIndex """ new_data = self._data if copy: new_data = new_data.copy() axis = self._get_axis_number(axis) if axis == 0: new_data.set_axis(1, self.index.to_period(freq=freq)) elif axis == 1: new_data.set_axis(0, self.columns.to_period(freq=freq)) else: # pragma: no cover raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format( ax=axis)) return self._constructor(new_data) def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dict The result will only be true at a location if all the labels match. If `values` is a Series, that's the index. If `values` is a dict, the keys must be the column names, which must match. If `values` is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. See Also -------- DataFrame.eq: Equality test for DataFrame. Series.isin: Equivalent method on Series. Series.str.contains: Test if pattern or regex is contained within a string of a Series or Index. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True When ``values`` is a Series or DataFrame the index and column must match. Note that 'falcon' does not match based on the number of legs in df2. >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]}, ... index=['spider', 'falcon']) >>> df.isin(other) num_legs num_wings falcon True True dog False False """ if isinstance(values, dict): from pandas.core.reshape.concat import concat values = collections.defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self), axis='index') elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("cannot compute isin with " "a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError("only list-like or dict-like objects are " "allowed to be passed to DataFrame.isin(), " "you passed a " "{0!r}".format(type(values).__name__)) return DataFrame( algorithms.isin(self.values.ravel(), values).reshape(self.shape), self.index, self.columns) # ---------------------------------------------------------------------- # Add plotting methods to DataFrame plot = CachedAccessor("plot", pandas.plotting.FramePlotMethods) hist = pandas.plotting.hist_frame boxplot = pandas.plotting.boxplot_frame sparse = CachedAccessor("sparse", SparseFrameAccessor) DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True, aliases={'rows': 0}, docs={ 'index': 'The index (row labels) of the DataFrame.', 'columns': 'The column labels of the DataFrame.'}) DataFrame._add_numeric_operations() DataFrame._add_series_or_dataframe_operations() ops.add_flex_arithmetic_methods(DataFrame) ops.add_special_arithmetic_methods(DataFrame) def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() for index, s in data.items(): for col, v in s.items(): new_data[col] = new_data.get(col, OrderedDict()) new_data[col][index] = v return new_data def _put_str(s, space): return '{s}'.format(s=s)[:space].ljust(space)
""" Tests for DatetimeArray """ import operator import numpy as np import pytest from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns import pandas.util.testing as tm class TestDatetimeArrayConstructor: def test_freq_validation(self): # GH#24623 check that invalid instances cannot be created with the # public constructor arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 msg = ("Inferred frequency H from passed values does not " "conform to passed frequency W-SUN") with pytest.raises(ValueError, match=msg): DatetimeArray(arr, freq="W") @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime, pd.DatetimeIndex]) def test_mixing_naive_tzaware_raises(self, meth): # GH#24569 arr = np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]) msg = ('Cannot mix tz-aware with tz-naive values|' 'Tz-aware datetime.datetime cannot be converted ' 'to datetime64 unless utc=True') for obj in [arr, arr[::-1]]: # check that we raise regardless of whether naive is found # before aware or vice-versa with pytest.raises(ValueError, match=msg): meth(obj) def test_from_pandas_array(self): arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 result = DatetimeArray._from_sequence(arr, freq='infer') expected = pd.date_range('1970-01-01', periods=5, freq='H')._data tm.assert_datetime_array_equal(result, expected) def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') with pytest.raises(TypeError, match='Timezone of the array'): DatetimeArray(arr, dtype=dtype) def test_non_array_raises(self): with pytest.raises(ValueError, match='list'): DatetimeArray([1, 2, 3]) def test_other_type_raises(self): with pytest.raises(ValueError, match="The dtype of 'values' is incorrect.*bool"): DatetimeArray(np.array([1, 2, 3], dtype='bool')) def test_incorrect_dtype_raises(self): with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): DatetimeArray(np.array([1, 2, 3], dtype='i8'), dtype='category') def test_freq_infer_raises(self): with pytest.raises(ValueError, match='Frequency inference'): DatetimeArray(np.array([1, 2, 3], dtype='i8'), freq="infer") def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) assert arr._data is data arr = DatetimeArray(data, copy=True) assert arr._data is not data class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is # sufficiently robust def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators): # arbitrary tz-naive DatetimeIndex opname = all_compare_operators.strip('_') op = getattr(operator, opname) dti = pd.date_range('2016-01-1', freq='MS', periods=9, tz=None) arr = DatetimeArray(dti) assert arr.freq == dti.freq assert arr.tz == dti.tz right = dti expected = np.ones(len(arr), dtype=bool) if opname in ['ne', 'gt', 'lt']: # for these the comparisons should be all-False expected = ~expected result = op(arr, arr) tm.assert_numpy_array_equal(result, expected) for other in [right, np.array(right)]: # TODO: add list and tuple, and object-dtype once those # are fixed in the constructor result = op(arr, other) tm.assert_numpy_array_equal(result, expected) result = op(other, arr) tm.assert_numpy_array_equal(result, expected) class TestDatetimeArray: def test_astype_to_same(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) assert result is arr @pytest.mark.parametrize("dtype", [ int, np.int32, np.int64, 'uint32', 'uint64', ]) def test_astype_int(self, dtype): arr = DatetimeArray._from_sequence([pd.Timestamp('2000'), pd.Timestamp('2001')]) result = arr.astype(dtype) if np.dtype(dtype).kind == 'u': expected_dtype = np.dtype('uint64') else: expected_dtype = np.dtype('int64') expected = arr.astype(expected_dtype) assert result.dtype == expected_dtype tm.assert_numpy_array_equal(result, expected) def test_tz_setter_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(AttributeError, match='tz_localize'): arr.tz = 'UTC' def test_setitem_different_tz_raises(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) with pytest.raises(ValueError, match="None"): arr[0] = pd.Timestamp('2000') with pytest.raises(ValueError, match="US/Central"): arr[0] = pd.Timestamp('2000', tz="US/Eastern") def test_setitem_clears_freq(self): a = DatetimeArray(pd.date_range('2000', periods=2, freq='D', tz='US/Central')) a[0] = pd.Timestamp("2000", tz="US/Central") assert a.freq is None def test_repeat_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti) repeated = arr.repeat([1, 1]) # preserves tz and values, but not freq expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) tm.assert_equal(repeated, expected) def test_value_counts_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti).repeat([4, 3]) result = arr.value_counts() # Note: not tm.assert_index_equal, since `freq`s do not match assert result.index.equals(dti) arr[-2] = pd.NaT result = arr.value_counts() expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('method', ['pad', 'backfill']) def test_fillna_preserves_tz(self, method): dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central') arr = DatetimeArray(dti, copy=True) arr[2] = pd.NaT fill_val = dti[1] if method == 'pad' else dti[3] expected = DatetimeArray._from_sequence( [dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz='US/Central' ) result = arr.fillna(method=method) tm.assert_extension_array_equal(result, expected) # assert that arr and dti were not modified in-place assert arr[2] is pd.NaT assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') def test_array_interface_tz(self): tz = "US/Central" data = DatetimeArray(pd.date_range('2017', periods=2, tz=tz)) result = np.asarray(data) expected = np.array([pd.Timestamp('2017-01-01T00:00:00', tz=tz), pd.Timestamp('2017-01-02T00:00:00', tz=tz)], dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype='M8[ns]') expected = np.array(['2017-01-01T06:00:00', '2017-01-02T06:00:00'], dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) def test_array_interface(self): data = DatetimeArray(pd.date_range('2017', periods=2)) expected = np.array(['2017-01-01T00:00:00', '2017-01-02T00:00:00'], dtype='datetime64[ns]') result = np.asarray(data) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) expected = np.array([pd.Timestamp('2017-01-01T00:00:00'), pd.Timestamp('2017-01-02T00:00:00')], dtype=object) tm.assert_numpy_array_equal(result, expected) class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(TypeError, match='data is already tz-aware'): sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC")) def test_tz_dtype_matches(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result, _, _ = sequence_to_dt64ns( arr, dtype=DatetimeTZDtype(tz="US/Central")) tm.assert_numpy_array_equal(arr._data, result) class TestReductions: @pytest.mark.parametrize("tz", [None, "US/Central"]) def test_min_max(self, tz): arr = DatetimeArray._from_sequence([ '2000-01-03', '2000-01-03', 'NaT', '2000-01-02', '2000-01-05', '2000-01-04', ], tz=tz) result = arr.min() expected = pd.Timestamp('2000-01-02', tz=tz) assert result == expected result = arr.max() expected = pd.Timestamp('2000-01-05', tz=tz) assert result == expected result = arr.min(skipna=False) assert result is pd.NaT result = arr.max(skipna=False) assert result is pd.NaT @pytest.mark.parametrize("tz", [None, "US/Central"]) @pytest.mark.parametrize('skipna', [True, False]) def test_min_max_empty(self, skipna, tz): arr = DatetimeArray._from_sequence([], tz=tz) result = arr.min(skipna=skipna) assert result is pd.NaT result = arr.max(skipna=skipna) assert result is pd.NaT
cbertinato/pandas
pandas/tests/arrays/test_datetimes.py
pandas/core/frame.py
""" manage PyTables query interface via Expressions """ import ast from functools import partial import numpy as np from pandas._libs.tslibs import Timedelta, Timestamp from pandas.compat.chainmap import DeepChainMap from pandas.core.dtypes.common import is_list_like import pandas as pd from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation import expr, ops from pandas.core.computation.common import _ensure_decoded from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.ops import UndefinedVariableError, is_term from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded class Scope(expr.Scope): __slots__ = 'queryables', def __init__(self, level, global_dict=None, local_dict=None, queryables=None): super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict) self.queryables = queryables or dict() class Term(ops.Term): def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls supr_new = StringMixin.__new__ return supr_new(klass) def __init__(self, name, env, side=None, encoding=None): super().__init__(name, env, side=side, encoding=encoding) def _resolve_name(self): # must be a queryables if self.side == 'left': if self.name not in self.env.queryables: raise NameError('name {name!r} is not defined' .format(name=self.name)) return self.name # resolve the rhs (and allow it to be None) try: return self.env.resolve(self.name, is_local=False) except UndefinedVariableError: return self.name # read-only property overwriting read/write property @property # type: ignore def value(self): return self._value class Constant(Term): def __init__(self, value, env, side=None, encoding=None): super().__init__(value, env, side=side, encoding=encoding) def _resolve_name(self): return self._name class BinOp(ops.BinOp): _max_selectors = 31 def __init__(self, op, lhs, rhs, queryables, encoding): super().__init__(op, lhs, rhs) self.queryables = queryables self.encoding = encoding self.filter = None self.condition = None def _disallow_scalar_only_bool_ops(self): pass def prune(self, klass): def pr(left, right): """ create and return a new specialized BinOp from myself """ if left is None: return right elif right is None: return left k = klass if isinstance(left, ConditionBinOp): if (isinstance(left, ConditionBinOp) and isinstance(right, ConditionBinOp)): k = JointConditionBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right elif isinstance(left, FilterBinOp): if (isinstance(left, FilterBinOp) and isinstance(right, FilterBinOp)): k = JointFilterBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right return k(self.op, left, right, queryables=self.queryables, encoding=self.encoding).evaluate() left, right = self.lhs, self.rhs if is_term(left) and is_term(right): res = pr(left.value, right.value) elif not is_term(left) and is_term(right): res = pr(left.prune(klass), right.value) elif is_term(left) and not is_term(right): res = pr(left.value, right.prune(klass)) elif not (is_term(left) or is_term(right)): res = pr(left.prune(klass), right.prune(klass)) return res def conform(self, rhs): """ inplace conform rhs """ if not is_list_like(rhs): rhs = [rhs] if isinstance(rhs, np.ndarray): rhs = rhs.ravel() return rhs @property def is_valid(self): """ return True if this is a valid field """ return self.lhs in self.queryables @property def is_in_table(self): """ return True if this is a valid column name for generation (e.g. an actual column in the table) """ return self.queryables.get(self.lhs) is not None @property def kind(self): """ the kind of my field """ return getattr(self.queryables.get(self.lhs), 'kind', None) @property def meta(self): """ the meta of my field """ return getattr(self.queryables.get(self.lhs), 'meta', None) @property def metadata(self): """ the metadata of my field """ return getattr(self.queryables.get(self.lhs), 'metadata', None) def generate(self, v): """ create and return the op string for this TermValue """ val = v.tostring(self.encoding) return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val) def convert_value(self, v): """ convert the expression that is in the term to something that is accepted by pytables """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) kind = _ensure_decoded(self.kind) meta = _ensure_decoded(self.meta) if kind == 'datetime64' or kind == 'datetime': if isinstance(v, (int, float)): v = stringify(v) v = _ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == 'timedelta64' or kind == 'timedelta': v = Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == 'category': metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata # check that metadata contains v if not result and v not in metadata: result = -1 return TermValue(result, result, 'integer') elif kind == 'integer': v = int(float(v)) return TermValue(v, v, kind) elif kind == 'float': v = float(v) return TermValue(v, v, kind) elif kind == 'bool': if isinstance(v, str): v = not v.strip().lower() in ['false', 'f', 'no', 'n', 'none', '0', '[]', '{}', ''] else: v = bool(v) return TermValue(v, v, kind) elif isinstance(v, str): # string quoting return TermValue(v, stringify(v), 'string') else: raise TypeError("Cannot compare {v} of type {typ} to {kind} column" .format(v=v, typ=type(v), kind=kind)) def convert_values(self): pass class FilterBinOp(BinOp): def __str__(self): return pprint_thing("[Filter : [{lhs}] -> [{op}]" .format(lhs=self.filter[0], op=self.filter[1])) def invert(self): """ invert the filter """ if self.filter is not None: f = list(self.filter) f[1] = self.generate_filter_op(invert=True) self.filter = tuple(f) return self def format(self): """ return the actual filter format """ return [self.filter] def evaluate(self): if not self.is_valid: raise ValueError("query term is not valid [{slf}]" .format(slf=self)) rhs = self.conform(self.rhs) values = [TermValue(v, v, self.kind).value for v in rhs] if self.is_in_table: # if too many values to create the expression, use a filter instead if self.op in ['==', '!='] and len(values) > self._max_selectors: filter_op = self.generate_filter_op() self.filter = ( self.lhs, filter_op, pd.Index(values)) return self return None # equality conditions if self.op in ['==', '!=']: filter_op = self.generate_filter_op() self.filter = ( self.lhs, filter_op, pd.Index(values)) else: raise TypeError("passing a filterable condition to a non-table " "indexer [{slf}]".format(slf=self)) return self def generate_filter_op(self, invert=False): if (self.op == '!=' and not invert) or (self.op == '==' and invert): return lambda axis, vals: ~axis.isin(vals) else: return lambda axis, vals: axis.isin(vals) class JointFilterBinOp(FilterBinOp): def format(self): raise NotImplementedError("unable to collapse Joint Filters") def evaluate(self): return self class ConditionBinOp(BinOp): def __str__(self): return pprint_thing("[Condition : [{cond}]]" .format(cond=self.condition)) def invert(self): """ invert the condition """ # if self.condition is not None: # self.condition = "~(%s)" % self.condition # return self raise NotImplementedError("cannot use an invert condition when " "passing to numexpr") def format(self): """ return the actual ne format """ return self.condition def evaluate(self): if not self.is_valid: raise ValueError("query term is not valid [{slf}]" .format(slf=self)) # convert values if we are in the table if not self.is_in_table: return None rhs = self.conform(self.rhs) values = [self.convert_value(v) for v in rhs] # equality conditions if self.op in ['==', '!=']: # too many values to create the expression? if len(values) <= self._max_selectors: vs = [self.generate(v) for v in values] self.condition = "({cond})".format(cond=' | '.join(vs)) # use a filter after reading else: return None else: self.condition = self.generate(values[0]) return self class JointConditionBinOp(ConditionBinOp): def evaluate(self): self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition, op=self.op, rhs=self.rhs.condition) return self class UnaryOp(ops.UnaryOp): def prune(self, klass): if self.op != '~': raise NotImplementedError("UnaryOp only support invert type ops") operand = self.operand operand = operand.prune(klass) if operand is not None: if issubclass(klass, ConditionBinOp): if operand.condition is not None: return operand.invert() elif issubclass(klass, FilterBinOp): if operand.filter is not None: return operand.invert() return None _op_classes = {'unary': UnaryOp} class ExprVisitor(BaseExprVisitor): const_type = Constant term_type = Term def __init__(self, env, engine, parser, **kwargs): super().__init__(env, engine, parser) for bin_op in self.binary_ops: bin_node = self.binary_op_nodes_map[bin_op] setattr(self, 'visit_{node}'.format(node=bin_node), lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs)) def visit_UnaryOp(self, node, **kwargs): if isinstance(node.op, (ast.Not, ast.Invert)): return UnaryOp('~', self.visit(node.operand)) elif isinstance(node.op, ast.USub): return self.const_type(-self.visit(node.operand).value, self.env) elif isinstance(node.op, ast.UAdd): raise NotImplementedError('Unary addition not supported') def visit_Index(self, node, **kwargs): return self.visit(node.value).value def visit_Assign(self, node, **kwargs): cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]) return self.visit(cmpr) def visit_Subscript(self, node, **kwargs): # only allow simple subscripts value = self.visit(node.value) slobj = self.visit(node.slice) try: value = value.value except AttributeError: pass try: return self.const_type(value[slobj], self.env) except TypeError: raise ValueError("cannot subscript {value!r} with " "{slobj!r}".format(value=value, slobj=slobj)) def visit_Attribute(self, node, **kwargs): attr = node.attr value = node.value ctx = node.ctx.__class__ if ctx == ast.Load: # resolve the value resolved = self.visit(value) # try to get the value to see if we are another expression try: resolved = resolved.value except (AttributeError): pass try: return self.term_type(getattr(resolved, attr), self.env) except AttributeError: # something like datetime.datetime where scope is overridden if isinstance(value, ast.Name) and value.id == attr: return resolved raise ValueError("Invalid Attribute context {name}" .format(name=ctx.__name__)) def translate_In(self, op): return ast.Eq() if isinstance(op, ast.In) else op def _rewrite_membership_op(self, node, left, right): return self.visit(node.op), node.op, left, right def _validate_where(w): """ Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict). """ if not (isinstance(w, (Expr, str)) or is_list_like(w)): raise TypeError("where must be passed as a string, Expr, " "or list-like of Exprs") return w class Expr(expr.Expr): """ hold a pytables like expression, comprised of possibly multiple 'terms' Parameters ---------- where : string term expression, Expr, or list-like of Exprs queryables : a "kinds" map (dict of column name -> kind), or None if column is non-indexable encoding : an encoding that will encode the query terms Returns ------- an Expr object Examples -------- 'index>=date' "columns=['A', 'D']" 'columns=A' 'columns==A' "~(columns=['A','B'])" 'index>df.index[3] & string="bar"' '(index>df.index[3] & index<=df.index[6]) | string="bar"' "ts>=Timestamp('2012-02-01')" "major_axis>=20130101" """ def __init__(self, where, queryables=None, encoding=None, scope_level=0): where = _validate_where(where) self.encoding = encoding self.condition = None self.filter = None self.terms = None self._visitor = None # capture the environment if needed local_dict = DeepChainMap() if isinstance(where, Expr): local_dict = where.env.scope where = where.expr elif isinstance(where, (list, tuple)): for idx, w in enumerate(where): if isinstance(w, Expr): local_dict = w.env.scope else: w = _validate_where(w) where[idx] = w where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa self.expr = where self.env = Scope(scope_level + 1, local_dict=local_dict) if queryables is not None and isinstance(self.expr, str): self.env.queryables.update(queryables) self._visitor = ExprVisitor(self.env, queryables=queryables, parser='pytables', engine='pytables', encoding=encoding) self.terms = self.parse() def __str__(self): if self.terms is not None: return pprint_thing(self.terms) return pprint_thing(self.expr) def evaluate(self): """ create and return the numexpr condition and filter """ try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid condition".format(expr=self.expr, slf=self)) try: self.filter = self.terms.prune(FilterBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid filter".format(expr=self.expr, slf=self)) return self.condition, self.filter class TermValue: """ hold a term value the we use to construct a condition/filter """ def __init__(self, value, converted, kind): self.value = value self.converted = converted self.kind = kind def tostring(self, encoding): """ quote the string if not encoded else encode and return """ if self.kind == 'string': if encoding is not None: return self.converted return '"{converted}"'.format(converted=self.converted) elif self.kind == 'float': # python 2 str(float) is not always # round-trippable so use repr() return repr(self.converted) return self.converted def maybe_expression(s): """ loose checking if s is a pytables-acceptable expression """ if not isinstance(s, str): return False ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',) # make sure we have an op at least return any(op in s for op in ops)
""" Tests for DatetimeArray """ import operator import numpy as np import pytest from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns import pandas.util.testing as tm class TestDatetimeArrayConstructor: def test_freq_validation(self): # GH#24623 check that invalid instances cannot be created with the # public constructor arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 msg = ("Inferred frequency H from passed values does not " "conform to passed frequency W-SUN") with pytest.raises(ValueError, match=msg): DatetimeArray(arr, freq="W") @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime, pd.DatetimeIndex]) def test_mixing_naive_tzaware_raises(self, meth): # GH#24569 arr = np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]) msg = ('Cannot mix tz-aware with tz-naive values|' 'Tz-aware datetime.datetime cannot be converted ' 'to datetime64 unless utc=True') for obj in [arr, arr[::-1]]: # check that we raise regardless of whether naive is found # before aware or vice-versa with pytest.raises(ValueError, match=msg): meth(obj) def test_from_pandas_array(self): arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 result = DatetimeArray._from_sequence(arr, freq='infer') expected = pd.date_range('1970-01-01', periods=5, freq='H')._data tm.assert_datetime_array_equal(result, expected) def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') with pytest.raises(TypeError, match='Timezone of the array'): DatetimeArray(arr, dtype=dtype) def test_non_array_raises(self): with pytest.raises(ValueError, match='list'): DatetimeArray([1, 2, 3]) def test_other_type_raises(self): with pytest.raises(ValueError, match="The dtype of 'values' is incorrect.*bool"): DatetimeArray(np.array([1, 2, 3], dtype='bool')) def test_incorrect_dtype_raises(self): with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): DatetimeArray(np.array([1, 2, 3], dtype='i8'), dtype='category') def test_freq_infer_raises(self): with pytest.raises(ValueError, match='Frequency inference'): DatetimeArray(np.array([1, 2, 3], dtype='i8'), freq="infer") def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) assert arr._data is data arr = DatetimeArray(data, copy=True) assert arr._data is not data class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is # sufficiently robust def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators): # arbitrary tz-naive DatetimeIndex opname = all_compare_operators.strip('_') op = getattr(operator, opname) dti = pd.date_range('2016-01-1', freq='MS', periods=9, tz=None) arr = DatetimeArray(dti) assert arr.freq == dti.freq assert arr.tz == dti.tz right = dti expected = np.ones(len(arr), dtype=bool) if opname in ['ne', 'gt', 'lt']: # for these the comparisons should be all-False expected = ~expected result = op(arr, arr) tm.assert_numpy_array_equal(result, expected) for other in [right, np.array(right)]: # TODO: add list and tuple, and object-dtype once those # are fixed in the constructor result = op(arr, other) tm.assert_numpy_array_equal(result, expected) result = op(other, arr) tm.assert_numpy_array_equal(result, expected) class TestDatetimeArray: def test_astype_to_same(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) assert result is arr @pytest.mark.parametrize("dtype", [ int, np.int32, np.int64, 'uint32', 'uint64', ]) def test_astype_int(self, dtype): arr = DatetimeArray._from_sequence([pd.Timestamp('2000'), pd.Timestamp('2001')]) result = arr.astype(dtype) if np.dtype(dtype).kind == 'u': expected_dtype = np.dtype('uint64') else: expected_dtype = np.dtype('int64') expected = arr.astype(expected_dtype) assert result.dtype == expected_dtype tm.assert_numpy_array_equal(result, expected) def test_tz_setter_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(AttributeError, match='tz_localize'): arr.tz = 'UTC' def test_setitem_different_tz_raises(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) with pytest.raises(ValueError, match="None"): arr[0] = pd.Timestamp('2000') with pytest.raises(ValueError, match="US/Central"): arr[0] = pd.Timestamp('2000', tz="US/Eastern") def test_setitem_clears_freq(self): a = DatetimeArray(pd.date_range('2000', periods=2, freq='D', tz='US/Central')) a[0] = pd.Timestamp("2000", tz="US/Central") assert a.freq is None def test_repeat_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti) repeated = arr.repeat([1, 1]) # preserves tz and values, but not freq expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) tm.assert_equal(repeated, expected) def test_value_counts_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti).repeat([4, 3]) result = arr.value_counts() # Note: not tm.assert_index_equal, since `freq`s do not match assert result.index.equals(dti) arr[-2] = pd.NaT result = arr.value_counts() expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('method', ['pad', 'backfill']) def test_fillna_preserves_tz(self, method): dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central') arr = DatetimeArray(dti, copy=True) arr[2] = pd.NaT fill_val = dti[1] if method == 'pad' else dti[3] expected = DatetimeArray._from_sequence( [dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz='US/Central' ) result = arr.fillna(method=method) tm.assert_extension_array_equal(result, expected) # assert that arr and dti were not modified in-place assert arr[2] is pd.NaT assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') def test_array_interface_tz(self): tz = "US/Central" data = DatetimeArray(pd.date_range('2017', periods=2, tz=tz)) result = np.asarray(data) expected = np.array([pd.Timestamp('2017-01-01T00:00:00', tz=tz), pd.Timestamp('2017-01-02T00:00:00', tz=tz)], dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype='M8[ns]') expected = np.array(['2017-01-01T06:00:00', '2017-01-02T06:00:00'], dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) def test_array_interface(self): data = DatetimeArray(pd.date_range('2017', periods=2)) expected = np.array(['2017-01-01T00:00:00', '2017-01-02T00:00:00'], dtype='datetime64[ns]') result = np.asarray(data) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) expected = np.array([pd.Timestamp('2017-01-01T00:00:00'), pd.Timestamp('2017-01-02T00:00:00')], dtype=object) tm.assert_numpy_array_equal(result, expected) class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(TypeError, match='data is already tz-aware'): sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC")) def test_tz_dtype_matches(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result, _, _ = sequence_to_dt64ns( arr, dtype=DatetimeTZDtype(tz="US/Central")) tm.assert_numpy_array_equal(arr._data, result) class TestReductions: @pytest.mark.parametrize("tz", [None, "US/Central"]) def test_min_max(self, tz): arr = DatetimeArray._from_sequence([ '2000-01-03', '2000-01-03', 'NaT', '2000-01-02', '2000-01-05', '2000-01-04', ], tz=tz) result = arr.min() expected = pd.Timestamp('2000-01-02', tz=tz) assert result == expected result = arr.max() expected = pd.Timestamp('2000-01-05', tz=tz) assert result == expected result = arr.min(skipna=False) assert result is pd.NaT result = arr.max(skipna=False) assert result is pd.NaT @pytest.mark.parametrize("tz", [None, "US/Central"]) @pytest.mark.parametrize('skipna', [True, False]) def test_min_max_empty(self, skipna, tz): arr = DatetimeArray._from_sequence([], tz=tz) result = arr.min(skipna=skipna) assert result is pd.NaT result = arr.max(skipna=skipna) assert result is pd.NaT
cbertinato/pandas
pandas/tests/arrays/test_datetimes.py
pandas/core/computation/pytables.py
import numpy as np import pytest import pandas as pd from pandas import Index, MultiIndex @pytest.fixture def idx(): # a MultiIndex used to test the general functionality of the # general functionality of this object major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_codes = np.array([0, 0, 1, 2, 3, 3]) minor_codes = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] mi = MultiIndex(levels=[major_axis, minor_axis], codes=[major_codes, minor_codes], names=index_names, verify_integrity=False) return mi @pytest.fixture def idx_dup(): # compare tests/indexes/multi/conftest.py major_axis = Index(['foo', 'bar', 'baz', 'qux']) minor_axis = Index(['one', 'two']) major_codes = np.array([0, 0, 1, 0, 1, 1]) minor_codes = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] mi = MultiIndex(levels=[major_axis, minor_axis], codes=[major_codes, minor_codes], names=index_names, verify_integrity=False) return mi @pytest.fixture def index_names(): # names that match those in the idx fixture for testing equality of # names assigned to the idx return ['first', 'second'] @pytest.fixture def holder(): # the MultiIndex constructor used to base compatibility with pickle return MultiIndex @pytest.fixture def compat_props(): # a MultiIndex must have these properties associated with it return ['shape', 'ndim', 'size'] @pytest.fixture def narrow_multi_index(): """ Return a MultiIndex that is narrower than the display (<80 characters). """ n = 1000 ci = pd.CategoricalIndex(list('a' * n) + (['abc'] * n)) dti = pd.date_range('2000-01-01', freq='s', periods=n * 2) return pd.MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=['a', 'b', 'dti']) @pytest.fixture def wide_multi_index(): """ Return a MultiIndex that is wider than the display (>80 characters). """ n = 1000 ci = pd.CategoricalIndex(list('a' * n) + (['abc'] * n)) dti = pd.date_range('2000-01-01', freq='s', periods=n * 2) levels = [ci, ci.codes + 9, dti, dti, dti] names = ['a', 'b', 'dti_1', 'dti_2', 'dti_3'] return pd.MultiIndex.from_arrays(levels, names=names)
""" Tests for DatetimeArray """ import operator import numpy as np import pytest from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns import pandas.util.testing as tm class TestDatetimeArrayConstructor: def test_freq_validation(self): # GH#24623 check that invalid instances cannot be created with the # public constructor arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 msg = ("Inferred frequency H from passed values does not " "conform to passed frequency W-SUN") with pytest.raises(ValueError, match=msg): DatetimeArray(arr, freq="W") @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime, pd.DatetimeIndex]) def test_mixing_naive_tzaware_raises(self, meth): # GH#24569 arr = np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]) msg = ('Cannot mix tz-aware with tz-naive values|' 'Tz-aware datetime.datetime cannot be converted ' 'to datetime64 unless utc=True') for obj in [arr, arr[::-1]]: # check that we raise regardless of whether naive is found # before aware or vice-versa with pytest.raises(ValueError, match=msg): meth(obj) def test_from_pandas_array(self): arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 result = DatetimeArray._from_sequence(arr, freq='infer') expected = pd.date_range('1970-01-01', periods=5, freq='H')._data tm.assert_datetime_array_equal(result, expected) def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') with pytest.raises(TypeError, match='Timezone of the array'): DatetimeArray(arr, dtype=dtype) def test_non_array_raises(self): with pytest.raises(ValueError, match='list'): DatetimeArray([1, 2, 3]) def test_other_type_raises(self): with pytest.raises(ValueError, match="The dtype of 'values' is incorrect.*bool"): DatetimeArray(np.array([1, 2, 3], dtype='bool')) def test_incorrect_dtype_raises(self): with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): DatetimeArray(np.array([1, 2, 3], dtype='i8'), dtype='category') def test_freq_infer_raises(self): with pytest.raises(ValueError, match='Frequency inference'): DatetimeArray(np.array([1, 2, 3], dtype='i8'), freq="infer") def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) assert arr._data is data arr = DatetimeArray(data, copy=True) assert arr._data is not data class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is # sufficiently robust def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators): # arbitrary tz-naive DatetimeIndex opname = all_compare_operators.strip('_') op = getattr(operator, opname) dti = pd.date_range('2016-01-1', freq='MS', periods=9, tz=None) arr = DatetimeArray(dti) assert arr.freq == dti.freq assert arr.tz == dti.tz right = dti expected = np.ones(len(arr), dtype=bool) if opname in ['ne', 'gt', 'lt']: # for these the comparisons should be all-False expected = ~expected result = op(arr, arr) tm.assert_numpy_array_equal(result, expected) for other in [right, np.array(right)]: # TODO: add list and tuple, and object-dtype once those # are fixed in the constructor result = op(arr, other) tm.assert_numpy_array_equal(result, expected) result = op(other, arr) tm.assert_numpy_array_equal(result, expected) class TestDatetimeArray: def test_astype_to_same(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) assert result is arr @pytest.mark.parametrize("dtype", [ int, np.int32, np.int64, 'uint32', 'uint64', ]) def test_astype_int(self, dtype): arr = DatetimeArray._from_sequence([pd.Timestamp('2000'), pd.Timestamp('2001')]) result = arr.astype(dtype) if np.dtype(dtype).kind == 'u': expected_dtype = np.dtype('uint64') else: expected_dtype = np.dtype('int64') expected = arr.astype(expected_dtype) assert result.dtype == expected_dtype tm.assert_numpy_array_equal(result, expected) def test_tz_setter_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(AttributeError, match='tz_localize'): arr.tz = 'UTC' def test_setitem_different_tz_raises(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) with pytest.raises(ValueError, match="None"): arr[0] = pd.Timestamp('2000') with pytest.raises(ValueError, match="US/Central"): arr[0] = pd.Timestamp('2000', tz="US/Eastern") def test_setitem_clears_freq(self): a = DatetimeArray(pd.date_range('2000', periods=2, freq='D', tz='US/Central')) a[0] = pd.Timestamp("2000", tz="US/Central") assert a.freq is None def test_repeat_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti) repeated = arr.repeat([1, 1]) # preserves tz and values, but not freq expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) tm.assert_equal(repeated, expected) def test_value_counts_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti).repeat([4, 3]) result = arr.value_counts() # Note: not tm.assert_index_equal, since `freq`s do not match assert result.index.equals(dti) arr[-2] = pd.NaT result = arr.value_counts() expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('method', ['pad', 'backfill']) def test_fillna_preserves_tz(self, method): dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central') arr = DatetimeArray(dti, copy=True) arr[2] = pd.NaT fill_val = dti[1] if method == 'pad' else dti[3] expected = DatetimeArray._from_sequence( [dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz='US/Central' ) result = arr.fillna(method=method) tm.assert_extension_array_equal(result, expected) # assert that arr and dti were not modified in-place assert arr[2] is pd.NaT assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') def test_array_interface_tz(self): tz = "US/Central" data = DatetimeArray(pd.date_range('2017', periods=2, tz=tz)) result = np.asarray(data) expected = np.array([pd.Timestamp('2017-01-01T00:00:00', tz=tz), pd.Timestamp('2017-01-02T00:00:00', tz=tz)], dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype='M8[ns]') expected = np.array(['2017-01-01T06:00:00', '2017-01-02T06:00:00'], dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) def test_array_interface(self): data = DatetimeArray(pd.date_range('2017', periods=2)) expected = np.array(['2017-01-01T00:00:00', '2017-01-02T00:00:00'], dtype='datetime64[ns]') result = np.asarray(data) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) expected = np.array([pd.Timestamp('2017-01-01T00:00:00'), pd.Timestamp('2017-01-02T00:00:00')], dtype=object) tm.assert_numpy_array_equal(result, expected) class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(TypeError, match='data is already tz-aware'): sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC")) def test_tz_dtype_matches(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result, _, _ = sequence_to_dt64ns( arr, dtype=DatetimeTZDtype(tz="US/Central")) tm.assert_numpy_array_equal(arr._data, result) class TestReductions: @pytest.mark.parametrize("tz", [None, "US/Central"]) def test_min_max(self, tz): arr = DatetimeArray._from_sequence([ '2000-01-03', '2000-01-03', 'NaT', '2000-01-02', '2000-01-05', '2000-01-04', ], tz=tz) result = arr.min() expected = pd.Timestamp('2000-01-02', tz=tz) assert result == expected result = arr.max() expected = pd.Timestamp('2000-01-05', tz=tz) assert result == expected result = arr.min(skipna=False) assert result is pd.NaT result = arr.max(skipna=False) assert result is pd.NaT @pytest.mark.parametrize("tz", [None, "US/Central"]) @pytest.mark.parametrize('skipna', [True, False]) def test_min_max_empty(self, skipna, tz): arr = DatetimeArray._from_sequence([], tz=tz) result = arr.min(skipna=skipna) assert result is pd.NaT result = arr.max(skipna=skipna) assert result is pd.NaT
cbertinato/pandas
pandas/tests/arrays/test_datetimes.py
pandas/tests/indexes/multi/conftest.py
import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import ( CategoricalIndex, DatetimeIndex, Index, Int64Index, IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, isna) from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin import pandas.util.testing as tm class Base: """ base class for index sub-class tests """ _holder = None _compat_props = ['shape', 'ndim', 'size', 'nbytes'] def setup_indices(self): for name, idx in self.indices.items(): setattr(self, name, idx) def test_pickle_compat_construction(self): # need an object to create with msg = (r"Index\(\.\.\.\) must be called with a collection of some" r" kind, None was passed|" r"__new__\(\) missing 1 required positional argument: 'data'|" r"__new__\(\) takes at least 2 arguments \(1 given\)") with pytest.raises(TypeError, match=msg): self._holder() def test_to_series(self): # assert that we are creating a copy of the index idx = self.create_index() s = idx.to_series() assert s.values is not idx.values assert s.index is not idx assert s.name == idx.name def test_to_series_with_arguments(self): # GH18699 # index kwarg idx = self.create_index() s = idx.to_series(index=idx) assert s.values is not idx.values assert s.index is idx assert s.name == idx.name # name kwarg idx = self.create_index() s = idx.to_series(name='__test') assert s.values is not idx.values assert s.index is not idx assert s.name != idx.name @pytest.mark.parametrize("name", [None, "new_name"]) def test_to_frame(self, name): # see GH-15230, GH-22580 idx = self.create_index() if name: idx_name = name else: idx_name = idx.name or 0 df = idx.to_frame(name=idx_name) assert df.index is idx assert len(df.columns) == 1 assert df.columns[0] == idx_name assert df[idx_name].values is not idx.values df = idx.to_frame(index=False, name=idx_name) assert df.index is not idx def test_to_frame_datetime_tz(self): # GH 25809 idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D') idx = idx.tz_localize('UTC') result = idx.to_frame() expected = pd.DataFrame(idx, index=idx) tm.assert_frame_equal(result, expected) def test_shift(self): # GH8083 test the base class for shift idx = self.create_index() msg = "Not supported for type {}".format(type(idx).__name__) with pytest.raises(NotImplementedError, match=msg): idx.shift(1) with pytest.raises(NotImplementedError, match=msg): idx.shift(1, 2) def test_create_index_existing_name(self): # GH11193, when an existing index is passed, and a new name is not # specified, the new index should inherit the previous object name expected = self.create_index() if not isinstance(expected, MultiIndex): expected.name = 'foo' result = pd.Index(expected) tm.assert_index_equal(result, expected) result = pd.Index(expected, name='bar') expected.name = 'bar' tm.assert_index_equal(result, expected) else: expected.names = ['foo', 'bar'] result = pd.Index(expected) tm.assert_index_equal( result, Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object'), names=['foo', 'bar'])) result = pd.Index(expected, names=['A', 'B']) tm.assert_index_equal( result, Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], dtype='object'), names=['A', 'B'])) def test_numeric_compat(self): idx = self.create_index() with pytest.raises(TypeError, match="cannot perform __mul__"): idx * 1 with pytest.raises(TypeError, match="cannot perform __rmul__"): 1 * idx div_err = "cannot perform __truediv__" with pytest.raises(TypeError, match=div_err): idx / 1 div_err = div_err.replace(' __', ' __r') with pytest.raises(TypeError, match=div_err): 1 / idx with pytest.raises(TypeError, match="cannot perform __floordiv__"): idx // 1 with pytest.raises(TypeError, match="cannot perform __rfloordiv__"): 1 // idx def test_logical_compat(self): idx = self.create_index() with pytest.raises(TypeError, match='cannot perform all'): idx.all() with pytest.raises(TypeError, match='cannot perform any'): idx.any() def test_boolean_context_compat(self): # boolean context compat idx = self.create_index() with pytest.raises(ValueError, match='The truth value of a'): if idx: pass def test_reindex_base(self): idx = self.create_index() expected = np.arange(idx.size, dtype=np.intp) actual = idx.get_indexer(idx) tm.assert_numpy_array_equal(expected, actual) with pytest.raises(ValueError, match='Invalid fill method'): idx.get_indexer(idx, method='invalid') def test_get_indexer_consistency(self): # See GH 16819 for name, index in self.indices.items(): if isinstance(index, IntervalIndex): continue if index.is_unique or isinstance(index, CategoricalIndex): indexer = index.get_indexer(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp else: e = "Reindexing only valid with uniquely valued Index objects" with pytest.raises(InvalidIndexError, match=e): index.get_indexer(index[0:2]) indexer, _ = index.get_indexer_non_unique(index[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp def test_ndarray_compat_properties(self): idx = self.create_index() assert idx.T.equals(idx) assert idx.transpose().equals(idx) values = idx.values for prop in self._compat_props: assert getattr(idx, prop) == getattr(values, prop) # test for validity idx.nbytes idx.values.nbytes def test_repr_roundtrip(self): idx = self.create_index() tm.assert_index_equal(eval(repr(idx)), idx) def test_str(self): # test the string repr idx = self.create_index() idx.name = 'foo' assert "'foo'" in str(idx) assert idx.__class__.__name__ in str(idx) def test_repr_max_seq_item_setting(self): # GH10182 idx = self.create_index() idx = idx.repeat(50) with pd.option_context("display.max_seq_items", None): repr(idx) assert '...' not in str(idx) def test_copy_name(self): # gh-12309: Check that the "name" argument # passed at initialization is honored. for name, index in self.indices.items(): if isinstance(index, MultiIndex): continue first = index.__class__(index, copy=True, name='mario') second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. assert first is not second # Not using tm.assert_index_equal() since names differ. assert index.equals(first) assert first.name == 'mario' assert second.name == 'mario' s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) if not isinstance(index, CategoricalIndex): # See gh-13365 s3 = s1 * s2 assert s3.index.name == 'mario' def test_ensure_copied_data(self): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 for name, index in self.indices.items(): init_kwargs = {} if isinstance(index, PeriodIndex): # Needs "freq" specification: init_kwargs['freq'] = index.freq elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): # RangeIndex cannot be initialized from data # MultiIndex and CategoricalIndex are tested separately continue index_type = index.__class__ result = index_type(index.values, copy=True, **init_kwargs) tm.assert_index_equal(index, result) tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='copy') if isinstance(index, PeriodIndex): # .values an object array of Period, thus copied result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='same') elif isinstance(index, IntervalIndex): # checked in test_interval.py pass else: result = index_type(index.values, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index.values, result.values, check_same='same') tm.assert_numpy_array_equal(index._ndarray_values, result._ndarray_values, check_same='same') def test_memory_usage(self): for name, index in self.indices.items(): result = index.memory_usage() if len(index): index.get_loc(index[0]) result2 = index.memory_usage() result3 = index.memory_usage(deep=True) # RangeIndex, IntervalIndex # don't have engines if not isinstance(index, (RangeIndex, IntervalIndex)): assert result2 > result if index.inferred_type == 'object': assert result3 > result2 else: # we report 0 for no-length assert result == 0 def test_argsort(self): for k, ind in self.indices.items(): # separately tested if k in ['catIndex']: continue result = ind.argsort() expected = np.array(ind).argsort() tm.assert_numpy_array_equal(result, expected, check_dtype=False) def test_numpy_argsort(self): for k, ind in self.indices.items(): result = np.argsort(ind) expected = ind.argsort() tm.assert_numpy_array_equal(result, expected) # these are the only two types that perform # pandas compatibility input validation - the # rest already perform separate (or no) such # validation via their 'values' attribute as # defined in pandas.core.indexes/base.py - they # cannot be changed at the moment due to # backwards compatibility concerns if isinstance(type(ind), (CategoricalIndex, RangeIndex)): msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, axis=1) msg = "the 'kind' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, kind='mergesort') msg = "the 'order' parameter is not supported" with pytest.raises(ValueError, match=msg): np.argsort(ind, order=('a', 'b')) def test_take(self): indexer = [4, 3, 0, 2] for k, ind in self.indices.items(): # separate if k in ['boolIndex', 'tuples', 'empty']: continue result = ind.take(indexer) expected = ind[indexer] assert result.equals(expected) if not isinstance(ind, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # GH 10791 with pytest.raises(AttributeError): ind.freq def test_take_invalid_kwargs(self): idx = self.create_index() indices = [1, 2] msg = r"take\(\) got an unexpected keyword argument 'foo'" with pytest.raises(TypeError, match=msg): idx.take(indices, foo=2) msg = "the 'out' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, out=indices) msg = "the 'mode' parameter is not supported" with pytest.raises(ValueError, match=msg): idx.take(indices, mode='clip') def test_repeat(self): rep = 2 i = self.create_index() expected = pd.Index(i.values.repeat(rep), name=i.name) tm.assert_index_equal(i.repeat(rep), expected) i = self.create_index() rep = np.arange(len(i)) expected = pd.Index(i.values.repeat(rep), name=i.name) tm.assert_index_equal(i.repeat(rep), expected) def test_numpy_repeat(self): rep = 2 i = self.create_index() expected = i.repeat(rep) tm.assert_index_equal(np.repeat(i, rep), expected) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.repeat(i, rep, axis=0) @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) def test_where(self, klass): i = self.create_index() cond = [True] * len(i) result = i.where(klass(cond)) expected = i tm.assert_index_equal(result, expected) cond = [False] + [True] * len(i[1:]) expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype) result = i.where(klass(cond)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("case", [0.5, "xxx"]) @pytest.mark.parametrize("method", ["intersection", "union", "difference", "symmetric_difference"]) def test_set_ops_error_cases(self, case, method): for name, idx in self.indices.items(): # non-iterable input msg = "Input must be Index or array-like" with pytest.raises(TypeError, match=msg): getattr(idx, method)(case) def test_intersection_base(self): for name, idx in self.indices.items(): first = idx[:5] second = idx[:3] intersect = first.intersection(second) if isinstance(idx, CategoricalIndex): pass else: assert tm.equalContents(intersect, second) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.intersection(case) assert tm.equalContents(result, second) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.intersection([1, 2, 3]) def test_union_base(self): for name, idx in self.indices.items(): first = idx[3:] second = idx[:5] everything = idx union = first.union(second) assert tm.equalContents(union, everything) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.union(case) assert tm.equalContents(result, everything) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.union([1, 2, 3]) @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): for name, idx in self.indices.items(): first = idx[2:] second = idx[:4] answer = idx[4:] result = first.difference(second, sort) if isinstance(idx, CategoricalIndex): pass else: assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)): assert result.__class__ == answer.__class__ tm.assert_numpy_array_equal(result.sort_values().asi8, answer.sort_values().asi8) else: result = first.difference(case, sort) assert tm.equalContents(result, answer) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.difference([1, 2, 3], sort) def test_symmetric_difference(self): for name, idx in self.indices.items(): first = idx[1:] second = idx[:-1] if isinstance(idx, CategoricalIndex): pass else: answer = idx[[0, -1]] result = first.symmetric_difference(second) assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if isinstance(idx, CategoricalIndex): pass else: result = first.symmetric_difference(case) assert tm.equalContents(result, answer) if isinstance(idx, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" with pytest.raises(TypeError, match=msg): first.symmetric_difference([1, 2, 3]) def test_insert_base(self): for name, idx in self.indices.items(): result = idx[1:4] if not len(idx): continue # test 0th element assert idx[0:4].equals(result.insert(0, idx[0])) def test_delete_base(self): for name, idx in self.indices.items(): if not len(idx): continue if isinstance(idx, RangeIndex): # tested in class continue expected = idx[1:] result = idx.delete(0) assert result.equals(expected) assert result.name == expected.name expected = idx[:-1] result = idx.delete(-1) assert result.equals(expected) assert result.name == expected.name with pytest.raises((IndexError, ValueError)): # either depending on numpy version idx.delete(len(idx)) def test_equals(self): for name, idx in self.indices.items(): assert idx.equals(idx) assert idx.equals(idx.copy()) assert idx.equals(idx.astype(object)) assert not idx.equals(list(idx)) assert not idx.equals(np.array(idx)) # Cannot pass in non-int64 dtype to RangeIndex if not isinstance(idx, RangeIndex): same_values = Index(idx, dtype=object) assert idx.equals(same_values) assert same_values.equals(idx) if idx.nlevels == 1: # do not test MultiIndex assert not idx.equals(pd.Series(idx)) def test_equals_op(self): # GH9947, GH10637 index_a = self.create_index() if isinstance(index_a, PeriodIndex): pytest.skip('Skip check for PeriodIndex') n = len(index_a) index_b = index_a[0:-1] index_c = index_a[0:-1].append(index_a[-2:-1]) index_d = index_a[0:1] msg = "Lengths must match|could not be broadcast" with pytest.raises(ValueError, match=msg): index_a == index_b expected1 = np.array([True] * n) expected2 = np.array([True] * (n - 1) + [False]) tm.assert_numpy_array_equal(index_a == index_a, expected1) tm.assert_numpy_array_equal(index_a == index_c, expected2) # test comparisons with numpy arrays array_a = np.array(index_a) array_b = np.array(index_a[0:-1]) array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) array_d = np.array(index_a[0:1]) with pytest.raises(ValueError, match=msg): index_a == array_b tm.assert_numpy_array_equal(index_a == array_a, expected1) tm.assert_numpy_array_equal(index_a == array_c, expected2) # test comparisons with Series series_a = Series(array_a) series_b = Series(array_b) series_c = Series(array_c) series_d = Series(array_d) with pytest.raises(ValueError, match=msg): index_a == series_b tm.assert_numpy_array_equal(index_a == series_a, expected1) tm.assert_numpy_array_equal(index_a == series_c, expected2) # cases where length is 1 for one of them with pytest.raises(ValueError, match="Lengths must match"): index_a == index_d with pytest.raises(ValueError, match="Lengths must match"): index_a == series_d with pytest.raises(ValueError, match="Lengths must match"): index_a == array_d msg = "Can only compare identically-labeled Series objects" with pytest.raises(ValueError, match=msg): series_a == series_d with pytest.raises(ValueError, match="Lengths must match"): series_a == array_d # comparing with a scalar should broadcast; note that we are excluding # MultiIndex because in this case each item in the index is a tuple of # length 2, and therefore is considered an array of length 2 in the # comparison instead of a scalar if not isinstance(index_a, MultiIndex): expected3 = np.array([False] * (len(index_a) - 2) + [True, False]) # assuming the 2nd to last item is unique in the data item = index_a[-2] tm.assert_numpy_array_equal(index_a == item, expected3) tm.assert_series_equal(series_a == item, Series(expected3)) def test_hasnans_isnans(self): # GH 11343, added tests for hasnans / isnans for name, index in self.indices.items(): if isinstance(index, MultiIndex): pass else: idx = index.copy() # cases in indices doesn't include NaN expected = np.array([False] * len(idx), dtype=bool) tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is False idx = index.copy() values = np.asarray(idx.values) if len(index) == 0: continue elif isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan if isinstance(index, PeriodIndex): idx = index.__class__(values, freq=index.freq) else: idx = index.__class__(values) expected = np.array([False] * len(idx), dtype=bool) expected[1] = True tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is True def test_fillna(self): # GH 11343 for name, index in self.indices.items(): if len(index) == 0: pass elif isinstance(index, MultiIndex): idx = index.copy() msg = "isna is not defined for MultiIndex" with pytest.raises(NotImplementedError, match=msg): idx.fillna(idx[0]) else: idx = index.copy() result = idx.fillna(idx[0]) tm.assert_index_equal(result, idx) assert result is not idx msg = "'value' must be a scalar, passed: " with pytest.raises(TypeError, match=msg): idx.fillna([idx[0]]) idx = index.copy() values = np.asarray(idx.values) if isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue else: values[1] = np.nan if isinstance(index, PeriodIndex): idx = index.__class__(values, freq=index.freq) else: idx = index.__class__(values) expected = np.array([False] * len(idx), dtype=bool) expected[1] = True tm.assert_numpy_array_equal(idx._isnan, expected) assert idx.hasnans is True def test_nulls(self): # this is really a smoke test for the methods # as these are adequately tested for function elsewhere for name, index in self.indices.items(): if len(index) == 0: tm.assert_numpy_array_equal( index.isna(), np.array([], dtype=bool)) elif isinstance(index, MultiIndex): idx = index.copy() msg = "isna is not defined for MultiIndex" with pytest.raises(NotImplementedError, match=msg): idx.isna() else: if not index.hasnans: tm.assert_numpy_array_equal( index.isna(), np.zeros(len(index), dtype=bool)) tm.assert_numpy_array_equal( index.notna(), np.ones(len(index), dtype=bool)) else: result = isna(index) tm.assert_numpy_array_equal(index.isna(), result) tm.assert_numpy_array_equal(index.notna(), ~result) def test_empty(self): # GH 15270 index = self.create_index() assert not index.empty assert index[:0].empty def test_join_self_unique(self, join_type): index = self.create_index() if index.is_unique: joined = index.join(index, how=join_type) assert (index == joined).all() def test_map(self): # callable index = self.create_index() # we don't infer UInt64 if isinstance(index, pd.UInt64Index): expected = index.astype('int64') else: expected = index result = index.map(lambda x: x) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "mapper", [ lambda values, index: {i: e for e, i in zip(values, index)}, lambda values, index: pd.Series(values, index)]) def test_map_dictlike(self, mapper): index = self.create_index() if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)): pytest.skip("skipping tests for {}".format(type(index))) identity = mapper(index.values, index) # we don't infer to UInt64 for a dict if isinstance(index, pd.UInt64Index) and isinstance(identity, dict): expected = index.astype('int64') else: expected = index result = index.map(identity) tm.assert_index_equal(result, expected) # empty mappable expected = pd.Index([np.nan] * len(index)) result = index.map(mapper(expected, index)) tm.assert_index_equal(result, expected) def test_putmask_with_wrong_mask(self): # GH18368 index = self.create_index() with pytest.raises(ValueError): index.putmask(np.ones(len(index) + 1, np.bool), 1) with pytest.raises(ValueError): index.putmask(np.ones(len(index) - 1, np.bool), 1) with pytest.raises(ValueError): index.putmask('foo', 1) @pytest.mark.parametrize('copy', [True, False]) @pytest.mark.parametrize('name', [None, 'foo']) @pytest.mark.parametrize('ordered', [True, False]) def test_astype_category(self, copy, name, ordered): # GH 18630 index = self.create_index() if name: index = index.rename(name) # standard categories dtype = CategoricalDtype(ordered=ordered) result = index.astype(dtype, copy=copy) expected = CategoricalIndex(index.values, name=name, ordered=ordered) tm.assert_index_equal(result, expected) # non-standard categories dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered) result = index.astype(dtype, copy=copy) expected = CategoricalIndex(index.values, name=name, dtype=dtype) tm.assert_index_equal(result, expected) if ordered is False: # dtype='category' defaults to ordered=False, so only test once result = index.astype('category', copy=copy) expected = CategoricalIndex(index.values, name=name) tm.assert_index_equal(result, expected) def test_is_unique(self): # initialize a unique index index = self.create_index().drop_duplicates() assert index.is_unique is True # empty index should be unique index_empty = index[:0] assert index_empty.is_unique is True # test basic dupes index_dup = index.insert(0, index[0]) assert index_dup.is_unique is False # single NA should be unique index_na = index.insert(0, np.nan) assert index_na.is_unique is True # multiple NA should not be unique index_na_dup = index_na.insert(0, np.nan) assert index_na_dup.is_unique is False
""" Tests for DatetimeArray """ import operator import numpy as np import pytest from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns import pandas.util.testing as tm class TestDatetimeArrayConstructor: def test_freq_validation(self): # GH#24623 check that invalid instances cannot be created with the # public constructor arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 msg = ("Inferred frequency H from passed values does not " "conform to passed frequency W-SUN") with pytest.raises(ValueError, match=msg): DatetimeArray(arr, freq="W") @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime, pd.DatetimeIndex]) def test_mixing_naive_tzaware_raises(self, meth): # GH#24569 arr = np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]) msg = ('Cannot mix tz-aware with tz-naive values|' 'Tz-aware datetime.datetime cannot be converted ' 'to datetime64 unless utc=True') for obj in [arr, arr[::-1]]: # check that we raise regardless of whether naive is found # before aware or vice-versa with pytest.raises(ValueError, match=msg): meth(obj) def test_from_pandas_array(self): arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 result = DatetimeArray._from_sequence(arr, freq='infer') expected = pd.date_range('1970-01-01', periods=5, freq='H')._data tm.assert_datetime_array_equal(result, expected) def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') with pytest.raises(TypeError, match='Timezone of the array'): DatetimeArray(arr, dtype=dtype) def test_non_array_raises(self): with pytest.raises(ValueError, match='list'): DatetimeArray([1, 2, 3]) def test_other_type_raises(self): with pytest.raises(ValueError, match="The dtype of 'values' is incorrect.*bool"): DatetimeArray(np.array([1, 2, 3], dtype='bool')) def test_incorrect_dtype_raises(self): with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): DatetimeArray(np.array([1, 2, 3], dtype='i8'), dtype='category') def test_freq_infer_raises(self): with pytest.raises(ValueError, match='Frequency inference'): DatetimeArray(np.array([1, 2, 3], dtype='i8'), freq="infer") def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) assert arr._data is data arr = DatetimeArray(data, copy=True) assert arr._data is not data class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is # sufficiently robust def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators): # arbitrary tz-naive DatetimeIndex opname = all_compare_operators.strip('_') op = getattr(operator, opname) dti = pd.date_range('2016-01-1', freq='MS', periods=9, tz=None) arr = DatetimeArray(dti) assert arr.freq == dti.freq assert arr.tz == dti.tz right = dti expected = np.ones(len(arr), dtype=bool) if opname in ['ne', 'gt', 'lt']: # for these the comparisons should be all-False expected = ~expected result = op(arr, arr) tm.assert_numpy_array_equal(result, expected) for other in [right, np.array(right)]: # TODO: add list and tuple, and object-dtype once those # are fixed in the constructor result = op(arr, other) tm.assert_numpy_array_equal(result, expected) result = op(other, arr) tm.assert_numpy_array_equal(result, expected) class TestDatetimeArray: def test_astype_to_same(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) assert result is arr @pytest.mark.parametrize("dtype", [ int, np.int32, np.int64, 'uint32', 'uint64', ]) def test_astype_int(self, dtype): arr = DatetimeArray._from_sequence([pd.Timestamp('2000'), pd.Timestamp('2001')]) result = arr.astype(dtype) if np.dtype(dtype).kind == 'u': expected_dtype = np.dtype('uint64') else: expected_dtype = np.dtype('int64') expected = arr.astype(expected_dtype) assert result.dtype == expected_dtype tm.assert_numpy_array_equal(result, expected) def test_tz_setter_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(AttributeError, match='tz_localize'): arr.tz = 'UTC' def test_setitem_different_tz_raises(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) with pytest.raises(ValueError, match="None"): arr[0] = pd.Timestamp('2000') with pytest.raises(ValueError, match="US/Central"): arr[0] = pd.Timestamp('2000', tz="US/Eastern") def test_setitem_clears_freq(self): a = DatetimeArray(pd.date_range('2000', periods=2, freq='D', tz='US/Central')) a[0] = pd.Timestamp("2000", tz="US/Central") assert a.freq is None def test_repeat_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti) repeated = arr.repeat([1, 1]) # preserves tz and values, but not freq expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) tm.assert_equal(repeated, expected) def test_value_counts_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti).repeat([4, 3]) result = arr.value_counts() # Note: not tm.assert_index_equal, since `freq`s do not match assert result.index.equals(dti) arr[-2] = pd.NaT result = arr.value_counts() expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('method', ['pad', 'backfill']) def test_fillna_preserves_tz(self, method): dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central') arr = DatetimeArray(dti, copy=True) arr[2] = pd.NaT fill_val = dti[1] if method == 'pad' else dti[3] expected = DatetimeArray._from_sequence( [dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz='US/Central' ) result = arr.fillna(method=method) tm.assert_extension_array_equal(result, expected) # assert that arr and dti were not modified in-place assert arr[2] is pd.NaT assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') def test_array_interface_tz(self): tz = "US/Central" data = DatetimeArray(pd.date_range('2017', periods=2, tz=tz)) result = np.asarray(data) expected = np.array([pd.Timestamp('2017-01-01T00:00:00', tz=tz), pd.Timestamp('2017-01-02T00:00:00', tz=tz)], dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype='M8[ns]') expected = np.array(['2017-01-01T06:00:00', '2017-01-02T06:00:00'], dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) def test_array_interface(self): data = DatetimeArray(pd.date_range('2017', periods=2)) expected = np.array(['2017-01-01T00:00:00', '2017-01-02T00:00:00'], dtype='datetime64[ns]') result = np.asarray(data) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) expected = np.array([pd.Timestamp('2017-01-01T00:00:00'), pd.Timestamp('2017-01-02T00:00:00')], dtype=object) tm.assert_numpy_array_equal(result, expected) class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(TypeError, match='data is already tz-aware'): sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC")) def test_tz_dtype_matches(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result, _, _ = sequence_to_dt64ns( arr, dtype=DatetimeTZDtype(tz="US/Central")) tm.assert_numpy_array_equal(arr._data, result) class TestReductions: @pytest.mark.parametrize("tz", [None, "US/Central"]) def test_min_max(self, tz): arr = DatetimeArray._from_sequence([ '2000-01-03', '2000-01-03', 'NaT', '2000-01-02', '2000-01-05', '2000-01-04', ], tz=tz) result = arr.min() expected = pd.Timestamp('2000-01-02', tz=tz) assert result == expected result = arr.max() expected = pd.Timestamp('2000-01-05', tz=tz) assert result == expected result = arr.min(skipna=False) assert result is pd.NaT result = arr.max(skipna=False) assert result is pd.NaT @pytest.mark.parametrize("tz", [None, "US/Central"]) @pytest.mark.parametrize('skipna', [True, False]) def test_min_max_empty(self, skipna, tz): arr = DatetimeArray._from_sequence([], tz=tz) result = arr.min(skipna=skipna) assert result is pd.NaT result = arr.max(skipna=skipna) assert result is pd.NaT
cbertinato/pandas
pandas/tests/arrays/test_datetimes.py
pandas/tests/indexes/common.py
import numpy as np from pandas._libs import algos as libalgos, index as libindex import pandas.util.testing as tm class TestNumericEngine: def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype num = 1000 arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) # monotonic increasing engine = engine_type(lambda: arr, len(arr)) assert engine.is_monotonic_increasing is True assert engine.is_monotonic_decreasing is False # monotonic decreasing engine = engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is True # neither monotonic increasing or decreasing arr = np.array([1] * num + [2] * num + [1] * num, dtype=dtype) engine = engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is False def test_is_unique(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype # unique arr = np.array([1, 3, 2], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.is_unique is True # not unique arr = np.array([1, 2, 1], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.is_unique is False def test_get_loc(self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype # unique arr = np.array([1, 2, 3], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.get_loc(2) == 1 # monotonic num = 1000 arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) engine = engine_type(lambda: arr, len(arr)) assert engine.get_loc(2) == slice(1000, 2000) # not monotonic arr = np.array([1, 2, 3] * num, dtype=dtype) engine = engine_type(lambda: arr, len(arr)) expected = np.array([False, True, False] * num, dtype=bool) result = engine.get_loc(2) assert (result == expected).all() def test_get_backfill_indexer( self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype arr = np.array([1, 5, 10], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) new = np.arange(12, dtype=dtype) result = engine.get_backfill_indexer(new) expected = libalgos.backfill(arr, new) tm.assert_numpy_array_equal(result, expected) def test_get_pad_indexer( self, numeric_indexing_engine_type_and_dtype): engine_type, dtype = numeric_indexing_engine_type_and_dtype arr = np.array([1, 5, 10], dtype=dtype) engine = engine_type(lambda: arr, len(arr)) new = np.arange(12, dtype=dtype) result = engine.get_pad_indexer(new) expected = libalgos.pad(arr, new) tm.assert_numpy_array_equal(result, expected) class TestObjectEngine: engine_type = libindex.ObjectEngine dtype = np.object_ values = list('abc') def test_is_monotonic(self): num = 1000 arr = np.array(['a'] * num + ['a'] * num + ['c'] * num, dtype=self.dtype) # monotonic increasing engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_monotonic_increasing is True assert engine.is_monotonic_decreasing is False # monotonic decreasing engine = self.engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is True # neither monotonic increasing or decreasing arr = np.array(['a'] * num + ['b'] * num + ['a'] * num, dtype=self.dtype) engine = self.engine_type(lambda: arr[::-1], len(arr)) assert engine.is_monotonic_increasing is False assert engine.is_monotonic_decreasing is False def test_is_unique(self): # unique arr = np.array(self.values, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_unique is True # not unique arr = np.array(['a', 'b', 'a'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.is_unique is False def test_get_loc(self): # unique arr = np.array(self.values, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.get_loc('b') == 1 # monotonic num = 1000 arr = np.array(['a'] * num + ['b'] * num + ['c'] * num, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) assert engine.get_loc('b') == slice(1000, 2000) # not monotonic arr = np.array(self.values * num, dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) expected = np.array([False, True, False] * num, dtype=bool) result = engine.get_loc('b') assert (result == expected).all() def test_get_backfill_indexer(self): arr = np.array(['a', 'e', 'j'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_backfill_indexer(new) expected = libalgos.backfill["object"](arr, new) tm.assert_numpy_array_equal(result, expected) def test_get_pad_indexer(self): arr = np.array(['a', 'e', 'j'], dtype=self.dtype) engine = self.engine_type(lambda: arr, len(arr)) new = np.array(list('abcdefghij'), dtype=self.dtype) result = engine.get_pad_indexer(new) expected = libalgos.pad["object"](arr, new) tm.assert_numpy_array_equal(result, expected)
""" Tests for DatetimeArray """ import operator import numpy as np import pytest from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns import pandas.util.testing as tm class TestDatetimeArrayConstructor: def test_freq_validation(self): # GH#24623 check that invalid instances cannot be created with the # public constructor arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 msg = ("Inferred frequency H from passed values does not " "conform to passed frequency W-SUN") with pytest.raises(ValueError, match=msg): DatetimeArray(arr, freq="W") @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime, pd.DatetimeIndex]) def test_mixing_naive_tzaware_raises(self, meth): # GH#24569 arr = np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]) msg = ('Cannot mix tz-aware with tz-naive values|' 'Tz-aware datetime.datetime cannot be converted ' 'to datetime64 unless utc=True') for obj in [arr, arr[::-1]]: # check that we raise regardless of whether naive is found # before aware or vice-versa with pytest.raises(ValueError, match=msg): meth(obj) def test_from_pandas_array(self): arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 result = DatetimeArray._from_sequence(arr, freq='infer') expected = pd.date_range('1970-01-01', periods=5, freq='H')._data tm.assert_datetime_array_equal(result, expected) def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') with pytest.raises(TypeError, match='Timezone of the array'): DatetimeArray(arr, dtype=dtype) def test_non_array_raises(self): with pytest.raises(ValueError, match='list'): DatetimeArray([1, 2, 3]) def test_other_type_raises(self): with pytest.raises(ValueError, match="The dtype of 'values' is incorrect.*bool"): DatetimeArray(np.array([1, 2, 3], dtype='bool')) def test_incorrect_dtype_raises(self): with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): DatetimeArray(np.array([1, 2, 3], dtype='i8'), dtype='category') def test_freq_infer_raises(self): with pytest.raises(ValueError, match='Frequency inference'): DatetimeArray(np.array([1, 2, 3], dtype='i8'), freq="infer") def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) assert arr._data is data arr = DatetimeArray(data, copy=True) assert arr._data is not data class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is # sufficiently robust def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators): # arbitrary tz-naive DatetimeIndex opname = all_compare_operators.strip('_') op = getattr(operator, opname) dti = pd.date_range('2016-01-1', freq='MS', periods=9, tz=None) arr = DatetimeArray(dti) assert arr.freq == dti.freq assert arr.tz == dti.tz right = dti expected = np.ones(len(arr), dtype=bool) if opname in ['ne', 'gt', 'lt']: # for these the comparisons should be all-False expected = ~expected result = op(arr, arr) tm.assert_numpy_array_equal(result, expected) for other in [right, np.array(right)]: # TODO: add list and tuple, and object-dtype once those # are fixed in the constructor result = op(arr, other) tm.assert_numpy_array_equal(result, expected) result = op(other, arr) tm.assert_numpy_array_equal(result, expected) class TestDatetimeArray: def test_astype_to_same(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) assert result is arr @pytest.mark.parametrize("dtype", [ int, np.int32, np.int64, 'uint32', 'uint64', ]) def test_astype_int(self, dtype): arr = DatetimeArray._from_sequence([pd.Timestamp('2000'), pd.Timestamp('2001')]) result = arr.astype(dtype) if np.dtype(dtype).kind == 'u': expected_dtype = np.dtype('uint64') else: expected_dtype = np.dtype('int64') expected = arr.astype(expected_dtype) assert result.dtype == expected_dtype tm.assert_numpy_array_equal(result, expected) def test_tz_setter_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(AttributeError, match='tz_localize'): arr.tz = 'UTC' def test_setitem_different_tz_raises(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) with pytest.raises(ValueError, match="None"): arr[0] = pd.Timestamp('2000') with pytest.raises(ValueError, match="US/Central"): arr[0] = pd.Timestamp('2000', tz="US/Eastern") def test_setitem_clears_freq(self): a = DatetimeArray(pd.date_range('2000', periods=2, freq='D', tz='US/Central')) a[0] = pd.Timestamp("2000", tz="US/Central") assert a.freq is None def test_repeat_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti) repeated = arr.repeat([1, 1]) # preserves tz and values, but not freq expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) tm.assert_equal(repeated, expected) def test_value_counts_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti).repeat([4, 3]) result = arr.value_counts() # Note: not tm.assert_index_equal, since `freq`s do not match assert result.index.equals(dti) arr[-2] = pd.NaT result = arr.value_counts() expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('method', ['pad', 'backfill']) def test_fillna_preserves_tz(self, method): dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central') arr = DatetimeArray(dti, copy=True) arr[2] = pd.NaT fill_val = dti[1] if method == 'pad' else dti[3] expected = DatetimeArray._from_sequence( [dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz='US/Central' ) result = arr.fillna(method=method) tm.assert_extension_array_equal(result, expected) # assert that arr and dti were not modified in-place assert arr[2] is pd.NaT assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') def test_array_interface_tz(self): tz = "US/Central" data = DatetimeArray(pd.date_range('2017', periods=2, tz=tz)) result = np.asarray(data) expected = np.array([pd.Timestamp('2017-01-01T00:00:00', tz=tz), pd.Timestamp('2017-01-02T00:00:00', tz=tz)], dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype='M8[ns]') expected = np.array(['2017-01-01T06:00:00', '2017-01-02T06:00:00'], dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) def test_array_interface(self): data = DatetimeArray(pd.date_range('2017', periods=2)) expected = np.array(['2017-01-01T00:00:00', '2017-01-02T00:00:00'], dtype='datetime64[ns]') result = np.asarray(data) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) expected = np.array([pd.Timestamp('2017-01-01T00:00:00'), pd.Timestamp('2017-01-02T00:00:00')], dtype=object) tm.assert_numpy_array_equal(result, expected) class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(TypeError, match='data is already tz-aware'): sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC")) def test_tz_dtype_matches(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result, _, _ = sequence_to_dt64ns( arr, dtype=DatetimeTZDtype(tz="US/Central")) tm.assert_numpy_array_equal(arr._data, result) class TestReductions: @pytest.mark.parametrize("tz", [None, "US/Central"]) def test_min_max(self, tz): arr = DatetimeArray._from_sequence([ '2000-01-03', '2000-01-03', 'NaT', '2000-01-02', '2000-01-05', '2000-01-04', ], tz=tz) result = arr.min() expected = pd.Timestamp('2000-01-02', tz=tz) assert result == expected result = arr.max() expected = pd.Timestamp('2000-01-05', tz=tz) assert result == expected result = arr.min(skipna=False) assert result is pd.NaT result = arr.max(skipna=False) assert result is pd.NaT @pytest.mark.parametrize("tz", [None, "US/Central"]) @pytest.mark.parametrize('skipna', [True, False]) def test_min_max_empty(self, skipna, tz): arr = DatetimeArray._from_sequence([], tz=tz) result = arr.min(skipna=skipna) assert result is pd.NaT result = arr.max(skipna=skipna) assert result is pd.NaT
cbertinato/pandas
pandas/tests/arrays/test_datetimes.py
pandas/tests/indexing/test_indexing_engines.py
""" Arithmetic operations for PandasObjects This is not a public API. """ import datetime import operator import textwrap from typing import Dict, Optional import warnings import numpy as np from pandas._libs import algos as libalgos, lib, ops as libops from pandas.errors import NullFrequencyError from pandas.util._decorators import Appender from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, find_common_type, maybe_upcast_putmask) from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike_v_numeric, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCIndexClass, ABCSeries, ABCSparseArray, ABCSparseSeries) from pandas.core.dtypes.missing import isna, notna import pandas as pd import pandas.core.common as com import pandas.core.missing as missing # ----------------------------------------------------------------------------- # Ops Wrapping Utilities def get_op_result_name(left, right): """ Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string """ # `left` is always a pd.Series when called from within ops if isinstance(right, (ABCSeries, pd.Index)): name = _maybe_match_name(left, right) else: name = left.name return name def _maybe_match_name(a, b): """ Try to find a name to attach to the result of an operation between a and b. If only one of these has a `name` attribute, return that name. Otherwise return a consensus name if they match of None if they have different names. Parameters ---------- a : object b : object Returns ------- name : str or None See Also -------- pandas.core.common.consensus_name_attr """ a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') if a_has and b_has: if a.name == b.name: return a.name else: # TODO: what if they both have np.nan for their names? return None elif a_has: return a.name elif b_has: return b.name return None def maybe_upcast_for_op(obj): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. """ if type(obj) is datetime.timedelta: # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype # raises TypeError return pd.Timedelta(obj) elif isinstance(obj, np.timedelta64) and not isna(obj): # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') # The isna check is to avoid casting timedelta64("NaT"), which would # return NaT and incorrectly be treated as a datetime-NaT. return pd.Timedelta(obj) elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to # timedelta64 when operating with timedelta64 return pd.TimedeltaIndex(obj) return obj # ----------------------------------------------------------------------------- # Reversed Operations not available in the stdlib operator module. # Defining these instead of using lambdas allows us to reference them by name. def radd(left, right): return right + left def rsub(left, right): return right - left def rmul(left, right): return right * left def rdiv(left, right): return right / left def rtruediv(left, right): return right / left def rfloordiv(left, right): return right // left def rmod(left, right): # check if right is a string as % is the string # formatting operation; this is a TypeError # otherwise perform the op if isinstance(right, str): raise TypeError("{typ} cannot perform the operation mod".format( typ=type(left).__name__)) return right % left def rdivmod(left, right): return divmod(right, left) def rpow(left, right): return right ** left def rand_(left, right): return operator.and_(right, left) def ror_(left, right): return operator.or_(right, left) def rxor(left, right): return operator.xor(right, left) # ----------------------------------------------------------------------------- def make_invalid_op(name): """ Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function """ def invalid_op(self, other=None): raise TypeError("cannot perform {name} with this index type: " "{typ}".format(name=name, typ=type(self).__name__)) invalid_op.__name__ = name return invalid_op def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. Parameters ---------- name : str Returns ------- eval_kwargs : dict Examples -------- >>> _gen_eval_kwargs("__add__") {} >>> _gen_eval_kwargs("rtruediv") {'reversed': True, 'truediv': True} """ kwargs = {} # Series appear to only pass __add__, __radd__, ... # but DataFrame gets both these dunder names _and_ non-dunder names # add, radd, ... name = name.replace('__', '') if name.startswith('r'): if name not in ['radd', 'rand', 'ror', 'rxor']: # Exclude commutative operations kwargs['reversed'] = True if name in ['truediv', 'rtruediv']: kwargs['truediv'] = True if name in ['ne']: kwargs['masker'] = True return kwargs def _gen_fill_zeros(name): """ Find the appropriate fill value to use when filling in undefined values in the results of the given operation caused by operating on (generally dividing by) zero. Parameters ---------- name : str Returns ------- fill_value : {None, np.nan, np.inf} """ name = name.strip('__') if 'div' in name: # truediv, floordiv, div, and reversed variants fill_value = np.inf elif 'mod' in name: # mod, rmod fill_value = np.nan else: fill_value = None return fill_value def _get_frame_op_default_axis(name): """ Only DataFrame cares about default_axis, specifically: special methods have default_axis=None and flex methods have default_axis='columns'. Parameters ---------- name : str Returns ------- default_axis: str or None """ if name.replace('__r', '__') in ['__and__', '__or__', '__xor__']: # bool methods return 'columns' elif name.startswith('__'): # __add__, __mul__, ... return None else: # add, mul, ... return 'columns' def _get_opstr(op, cls): """ Find the operation string, if any, to pass to numexpr for this operation. Parameters ---------- op : binary operator cls : class Returns ------- op_str : string or None """ # numexpr is available for non-sparse classes subtyp = getattr(cls, '_subtyp', '') use_numexpr = 'sparse' not in subtyp if not use_numexpr: # if we're not using numexpr, then don't pass a str_rep return None return {operator.add: '+', radd: '+', operator.mul: '*', rmul: '*', operator.sub: '-', rsub: '-', operator.truediv: '/', rtruediv: '/', operator.floordiv: '//', rfloordiv: '//', operator.mod: None, # TODO: Why None for mod but '%' for rmod? rmod: '%', operator.pow: '**', rpow: '**', operator.eq: '==', operator.ne: '!=', operator.le: '<=', operator.lt: '<', operator.ge: '>=', operator.gt: '>', operator.and_: '&', rand_: '&', operator.or_: '|', ror_: '|', operator.xor: '^', rxor: '^', divmod: None, rdivmod: None}[op] def _get_op_name(op, special): """ Find the name to attach to this method according to conventions for special and non-special methods. Parameters ---------- op : binary operator special : bool Returns ------- op_name : str """ opname = op.__name__.strip('_') if special: opname = '__{opname}__'.format(opname=opname) return opname # ----------------------------------------------------------------------------- # Docstring Generation and Templates _add_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.add(b, fill_value=0) a 2.0 b 1.0 c 1.0 d 1.0 e NaN dtype: float64 """ _sub_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.subtract(b, fill_value=0) a 0.0 b 1.0 c 1.0 d -1.0 e NaN dtype: float64 """ _mul_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.multiply(b, fill_value=0) a 1.0 b 0.0 c 0.0 d 0.0 e NaN dtype: float64 """ _div_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.divide(b, fill_value=0) a 1.0 b inf c inf d 0.0 e NaN dtype: float64 """ _floordiv_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.floordiv(b, fill_value=0) a 1.0 b NaN c NaN d 0.0 e NaN dtype: float64 """ _mod_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.mod(b, fill_value=0) a 0.0 b NaN c NaN d 0.0 e NaN dtype: float64 """ _pow_example_SERIES = """ Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.pow(b, fill_value=0) a 1.0 b 1.0 c 1.0 d 0.0 e NaN dtype: float64 """ _op_descriptions = { # Arithmetic Operators 'add': {'op': '+', 'desc': 'Addition', 'reverse': 'radd', 'series_examples': _add_example_SERIES}, 'sub': {'op': '-', 'desc': 'Subtraction', 'reverse': 'rsub', 'series_examples': _sub_example_SERIES}, 'mul': {'op': '*', 'desc': 'Multiplication', 'reverse': 'rmul', 'series_examples': _mul_example_SERIES, 'df_examples': None}, 'mod': {'op': '%', 'desc': 'Modulo', 'reverse': 'rmod', 'series_examples': _mod_example_SERIES}, 'pow': {'op': '**', 'desc': 'Exponential power', 'reverse': 'rpow', 'series_examples': _pow_example_SERIES, 'df_examples': None}, 'truediv': {'op': '/', 'desc': 'Floating division', 'reverse': 'rtruediv', 'series_examples': _div_example_SERIES, 'df_examples': None}, 'floordiv': {'op': '//', 'desc': 'Integer division', 'reverse': 'rfloordiv', 'series_examples': _floordiv_example_SERIES, 'df_examples': None}, 'divmod': {'op': 'divmod', 'desc': 'Integer division and modulo', 'reverse': 'rdivmod', 'series_examples': None, 'df_examples': None}, # Comparison Operators 'eq': {'op': '==', 'desc': 'Equal to', 'reverse': None, 'series_examples': None}, 'ne': {'op': '!=', 'desc': 'Not equal to', 'reverse': None, 'series_examples': None}, 'lt': {'op': '<', 'desc': 'Less than', 'reverse': None, 'series_examples': None}, 'le': {'op': '<=', 'desc': 'Less than or equal to', 'reverse': None, 'series_examples': None}, 'gt': {'op': '>', 'desc': 'Greater than', 'reverse': None, 'series_examples': None}, 'ge': {'op': '>=', 'desc': 'Greater than or equal to', 'reverse': None, 'series_examples': None} } # type: Dict[str, Dict[str, Optional[str]]] _op_names = list(_op_descriptions.keys()) for key in _op_names: reverse_op = _op_descriptions[key]['reverse'] if reverse_op is not None: _op_descriptions[reverse_op] = _op_descriptions[key].copy() _op_descriptions[reverse_op]['reverse'] = key _flex_doc_SERIES = """ Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. Parameters ---------- other : Series or scalar value fill_value : None or float value, default None (NaN) Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing the result will be missing. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series The result of the operation. See Also -------- Series.{reverse} """ _arith_doc_FRAME = """ Binary operator %s with support to substitute a fill_value for missing data in one of the inputs Parameters ---------- other : Series, DataFrame, or constant axis : {0, 1, 'index', 'columns'} For Series input, axis to match Series index on fill_value : None or float value, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level Returns ------- result : DataFrame Notes ----- Mismatched indices will be unioned together """ _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. Parameters ---------- other : scalar, sequence, Series, or DataFrame Any single or multiple element data structure, or list-like object. axis : {{0 or 'index', 1 or 'columns'}} Whether to compare by the index (0 or 'index') or columns (1 or 'columns'). For Series input, axis to match Series index on. level : int or label Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : float or None, default None Fill existing missing (NaN) values, and any new element needed for successful DataFrame alignment, with this value before computation. If data in both corresponding DataFrame locations is missing the result will be missing. Returns ------- DataFrame Result of the arithmetic operation. See Also -------- DataFrame.add : Add DataFrames. DataFrame.sub : Subtract DataFrames. DataFrame.mul : Multiply DataFrames. DataFrame.div : Divide DataFrames (float division). DataFrame.truediv : Divide DataFrames (float division). DataFrame.floordiv : Divide DataFrames (integer division). DataFrame.mod : Calculate modulo (remainder after division). DataFrame.pow : Calculate exponential power. Notes ----- Mismatched indices will be unioned together. Examples -------- >>> df = pd.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide by constant with reverse version. >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract a list and Series by axis with operator version. >>> df - [1, 2] angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub([1, 2], axis='columns') angles degrees circle -1 358 triangle 2 178 rectangle 3 358 >>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), ... axis='index') angles degrees circle -1 359 triangle 2 179 rectangle 3 359 Multiply a DataFrame of different shape with operator version. >>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, ... index=['circle', 'triangle', 'rectangle']) >>> other angles circle 0 triangle 3 rectangle 4 >>> df * other angles degrees circle 0 NaN triangle 9 NaN rectangle 16 NaN >>> df.mul(other, fill_value=0) angles degrees circle 0 0.0 triangle 9 0.0 rectangle 16 0.0 Divide by a MultiIndex by level. >>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], ... 'degrees': [360, 180, 360, 360, 540, 720]}}, ... index=[['A', 'A', 'A', 'B', 'B', 'B'], ... ['circle', 'triangle', 'rectangle', ... 'square', 'pentagon', 'hexagon']]) >>> df_multindex angles degrees A circle 0 360 triangle 3 180 rectangle 4 360 B square 4 360 pentagon 5 540 hexagon 6 720 >>> df.div(df_multindex, level=1, fill_value=0) angles degrees A circle NaN 1.0 triangle 1.0 1.0 rectangle 1.0 1.0 B square 0.0 0.0 pentagon 0.0 0.0 hexagon 0.0 0.0 """ _flex_comp_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison operators. Equivalent to `==`, `=!`, `<=`, `<`, `>=`, `>` with support to choose axis (rows or columns) and level for comparison. Parameters ---------- other : scalar, sequence, Series, or DataFrame Any single or multiple element data structure, or list-like object. axis : {{0 or 'index', 1 or 'columns'}}, default 'columns' Whether to compare by the index (0 or 'index') or columns (1 or 'columns'). level : int or label Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- DataFrame of bool Result of the comparison. See Also -------- DataFrame.eq : Compare DataFrames for equality elementwise. DataFrame.ne : Compare DataFrames for inequality elementwise. DataFrame.le : Compare DataFrames for less than inequality or equality elementwise. DataFrame.lt : Compare DataFrames for strictly less than inequality elementwise. DataFrame.ge : Compare DataFrames for greater than inequality or equality elementwise. DataFrame.gt : Compare DataFrames for strictly greater than inequality elementwise. Notes ----- Mismatched indices will be unioned together. `NaN` values are considered different (i.e. `NaN` != `NaN`). Examples -------- >>> df = pd.DataFrame({{'cost': [250, 150, 100], ... 'revenue': [100, 250, 300]}}, ... index=['A', 'B', 'C']) >>> df cost revenue A 250 100 B 150 250 C 100 300 Comparison with a scalar, using either the operator or method: >>> df == 100 cost revenue A False True B False False C True False >>> df.eq(100) cost revenue A False True B False False C True False When `other` is a :class:`Series`, the columns of a DataFrame are aligned with the index of `other` and broadcast: >>> df != pd.Series([100, 250], index=["cost", "revenue"]) cost revenue A True True B True False C False True Use the method to control the broadcast axis: >>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') cost revenue A True False B True True C True True D True True When comparing to an arbitrary sequence, the number of columns must match the number elements in `other`: >>> df == [250, 100] cost revenue A True True B False False C False False Use the method to control the axis: >>> df.eq([250, 250, 100], axis='index') cost revenue A True False B False True C True False Compare to a DataFrame of different shape. >>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}}, ... index=['A', 'B', 'C', 'D']) >>> other revenue A 300 B 250 C 100 D 150 >>> df.gt(other) cost revenue A False False B False False C False True D False False Compare to a MultiIndex by level. >>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], ... 'revenue': [100, 250, 300, 200, 175, 225]}}, ... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], ... ['A', 'B', 'C', 'A', 'B', 'C']]) >>> df_multindex cost revenue Q1 A 250 100 B 150 250 C 100 300 Q2 A 150 200 B 300 175 C 220 225 >>> df.le(df_multindex, level=1) cost revenue Q1 A True True B True True C True True Q2 A False True B True False C True False """ def _make_flex_doc(op_name, typ): """ Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe']} Returns ------- doc : str """ op_name = op_name.replace('__', '') op_desc = _op_descriptions[op_name] if op_name.startswith('r'): equiv = 'other ' + op_desc['op'] + ' ' + typ else: equiv = typ + ' ' + op_desc['op'] + ' other' if typ == 'series': base_doc = _flex_doc_SERIES doc_no_examples = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) if op_desc['series_examples']: doc = doc_no_examples + op_desc['series_examples'] else: doc = doc_no_examples elif typ == 'dataframe': base_doc = _flex_doc_FRAME doc = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) else: raise AssertionError('Invalid typ argument.') return doc # ----------------------------------------------------------------------------- # Masking NA values and fallbacks for operations numpy does not support def fill_binop(left, right, fill_value): """ If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like right : array-like Notes ----- Makes copies if fill_value is not None """ # TODO: can we make a no-copy implementation? if fill_value is not None: left_mask = isna(left) right_mask = isna(right) left = left.copy() right = right.copy() # one but not both mask = left_mask ^ right_mask left[left_mask & mask] = fill_value right[right_mask & mask] = fill_value return left, right def mask_cmp_op(x, y, op): """ Apply the function `op` to only non-null points in x and y. Parameters ---------- x : array-like y : array-like op : binary operation Returns ------- result : ndarray[bool] """ xrav = x.ravel() result = np.empty(x.size, dtype=bool) if isinstance(y, (np.ndarray, ABCSeries)): yrav = y.ravel() mask = notna(xrav) & notna(yrav) result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask]))) else: mask = notna(xrav) result[mask] = op(np.array(list(xrav[mask])), y) if op == operator.ne: # pragma: no cover np.putmask(result, ~mask, True) else: np.putmask(result, ~mask, False) result = result.reshape(x.shape) return result def masked_arith_op(x, y, op): """ If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator """ # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes # the logic valid for both Series and DataFrame ops. xrav = x.ravel() assert isinstance(x, (np.ndarray, ABCSeries)), type(x) if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) # PeriodIndex.ravel() returns int64 dtype, so we have # to work around that case. See GH#19956 yrav = y if is_period_dtype(y) else y.ravel() mask = notna(xrav) & notna(yrav) if yrav.shape != mask.shape: # FIXME: GH#5284, GH#5035, GH#19448 # Without specifically raising here we get mismatched # errors in Py3 (TypeError) vs Py2 (ValueError) # Note: Only = an issue in DataFrame case raise ValueError('Cannot broadcast operands together.') if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], com.values_from_object(yrav[mask])) else: assert is_scalar(y), type(y) assert isinstance(x, np.ndarray), type(x) # mask is only meaningful for x result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) # 1 ** np.nan is 1. So we have to unmask those. if op == pow: mask = np.where(x == 1, False, mask) elif op == rpow: mask = np.where(y == 1, False, mask) if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], y) result, changed = maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) # 2D compat return result def invalid_comparison(left, right, op): """ If a comparison has mismatched types and is not necessarily meaningful, follow python3 conventions by: - returning all-False for equality - returning all-True for inequality - raising TypeError otherwise Parameters ---------- left : array-like right : scalar, array-like op : operator.{eq, ne, lt, le, gt} Raises ------ TypeError : on inequality comparisons """ if op is operator.eq: res_values = np.zeros(left.shape, dtype=bool) elif op is operator.ne: res_values = np.ones(left.shape, dtype=bool) else: raise TypeError("Invalid comparison between dtype={dtype} and {typ}" .format(dtype=left.dtype, typ=type(right).__name__)) return res_values # ----------------------------------------------------------------------------- # Dispatch logic def should_series_dispatch(left, right, op): """ Identify cases where a DataFrame operation should dispatch to its Series counterpart. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- override : bool """ if left._is_mixed_type or right._is_mixed_type: return True if not len(left.columns) or not len(right.columns): # ensure obj.dtypes[0] exists for each obj return False ldtype = left.dtypes.iloc[0] rdtype = right.dtypes.iloc[0] if ((is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype))): # numpy integer dtypes as timedelta64 dtypes in this scenario return True if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype): # in particular case where right is an array of DateOffsets return True return False def dispatch_to_series(left, right, func, str_rep=None, axis=None): """ Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. Parameters ---------- left : DataFrame right : scalar or DataFrame func : arithmetic or comparison operator str_rep : str or None, default None axis : {None, 0, 1, "index", "columns"} Returns ------- DataFrame """ # Note: we use iloc to access columns for compat with cases # with non-unique columns. import pandas.core.computation.expressions as expressions right = lib.item_from_zerodim(right) if lib.is_scalar(right) or np.ndim(right) == 0: def column_op(a, b): return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))} elif isinstance(right, ABCDataFrame): assert right._indexed_same(left) def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries) and axis == "columns": # We only get here if called via left._combine_match_columns, # in which case we specifically want to operate row-by-row assert right.index.equals(left.columns) def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries): assert right.index.equals(left.index) # Handle other cases later def column_op(a, b): return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))} else: # Remaining cases have less-obvious dispatch rules raise NotImplementedError(right) new_data = expressions.evaluate(column_op, str_rep, left, right) result = left._constructor(new_data, index=left.index, copy=False) # Pin columns instead of passing to constructor for compat with # non-unique columns case result.columns = left.columns return result def dispatch_to_index_op(op, left, right, index_class): """ Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series """ left_idx = index_class(left) # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, # left_idx may inherit a freq from a cached DatetimeIndex. # See discussion in GH#19147. if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) except NullFrequencyError: # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError # on add/sub of integers (or int-like). We re-raise as a TypeError. raise TypeError('incompatible type for a datetime/timedelta ' 'operation [{name}]'.format(name=op.__name__)) return result def dispatch_to_extension_op(op, left, right): """ Assume that left or right is a Series backed by an ExtensionArray, apply the operator defined by op. """ # The op calls will raise TypeError if the op is not defined # on the ExtensionArray # unbox Series and Index to arrays if isinstance(left, (ABCSeries, ABCIndexClass)): new_left = left._values else: new_left = left if isinstance(right, (ABCSeries, ABCIndexClass)): new_right = right._values else: new_right = right res_values = op(new_left, new_right) res_name = get_op_result_name(left, right) if op.__name__ in ['divmod', 'rdivmod']: return _construct_divmod_result( left, res_values, left.index, res_name) return _construct_result(left, res_values, left.index, res_name) # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory # methods def _get_method_wrappers(cls): """ Find the appropriate operation-wrappers to use when defining flex/special arithmetic, boolean, and comparison operations with the given class. Parameters ---------- cls : class Returns ------- arith_flex : function or None comp_flex : function or None arith_special : function comp_special : function bool_special : function Notes ----- None is only returned for SparseArray """ if issubclass(cls, ABCSparseSeries): # Be sure to catch this before ABCSeries and ABCSparseArray, # as they will both come see SparseSeries as a subclass arith_flex = _flex_method_SERIES comp_flex = _flex_method_SERIES arith_special = _arith_method_SPARSE_SERIES comp_special = _arith_method_SPARSE_SERIES bool_special = _bool_method_SERIES # TODO: I don't think the functions defined by bool_method are tested elif issubclass(cls, ABCSeries): # Just Series; SparseSeries is caught above arith_flex = _flex_method_SERIES comp_flex = _flex_method_SERIES arith_special = _arith_method_SERIES comp_special = _comp_method_SERIES bool_special = _bool_method_SERIES elif issubclass(cls, ABCSparseArray): arith_flex = None comp_flex = None arith_special = _arith_method_SPARSE_ARRAY comp_special = _arith_method_SPARSE_ARRAY bool_special = _arith_method_SPARSE_ARRAY elif issubclass(cls, ABCDataFrame): # Same for DataFrame and SparseDataFrame arith_flex = _arith_method_FRAME comp_flex = _flex_comp_method_FRAME arith_special = _arith_method_FRAME comp_special = _comp_method_FRAME bool_special = _arith_method_FRAME return arith_flex, comp_flex, arith_special, comp_special, bool_special def _create_methods(cls, arith_method, comp_method, bool_method, special): # creates actual methods based upon arithmetic, comp and bool method # constructors. have_divmod = issubclass(cls, ABCSeries) # divmod is available for Series and SparseSeries # yapf: disable new_methods = dict( add=arith_method(cls, operator.add, special), radd=arith_method(cls, radd, special), sub=arith_method(cls, operator.sub, special), mul=arith_method(cls, operator.mul, special), truediv=arith_method(cls, operator.truediv, special), floordiv=arith_method(cls, operator.floordiv, special), # Causes a floating point exception in the tests when numexpr enabled, # so for now no speedup mod=arith_method(cls, operator.mod, special), pow=arith_method(cls, operator.pow, special), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility rmul=arith_method(cls, rmul, special), rsub=arith_method(cls, rsub, special), rtruediv=arith_method(cls, rtruediv, special), rfloordiv=arith_method(cls, rfloordiv, special), rpow=arith_method(cls, rpow, special), rmod=arith_method(cls, rmod, special)) # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] if have_divmod: # divmod doesn't have an op that is supported by numexpr new_methods['divmod'] = arith_method(cls, divmod, special) new_methods['rdivmod'] = arith_method(cls, rdivmod, special) new_methods.update(dict( eq=comp_method(cls, operator.eq, special), ne=comp_method(cls, operator.ne, special), lt=comp_method(cls, operator.lt, special), gt=comp_method(cls, operator.gt, special), le=comp_method(cls, operator.le, special), ge=comp_method(cls, operator.ge, special))) if bool_method: new_methods.update( dict(and_=bool_method(cls, operator.and_, special), or_=bool_method(cls, operator.or_, special), # For some reason ``^`` wasn't used in original. xor=bool_method(cls, operator.xor, special), rand_=bool_method(cls, rand_, special), ror_=bool_method(cls, ror_, special), rxor=bool_method(cls, rxor, special))) if special: dunderize = lambda x: '__{name}__'.format(name=x.strip('_')) else: dunderize = lambda x: x new_methods = {dunderize(k): v for k, v in new_methods.items()} return new_methods def add_methods(cls, new_methods): for name, method in new_methods.items(): # For most methods, if we find that the class already has a method # of the same name, it is OK to over-write it. The exception is # inplace methods (__iadd__, __isub__, ...) for SparseArray, which # retain the np.ndarray versions. force = not (issubclass(cls, ABCSparseArray) and name.startswith('__i')) if force or name not in cls.__dict__: setattr(cls, name, method) # ---------------------------------------------------------------------- # Arithmetic def add_special_arithmetic_methods(cls): """ Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. Parameters ---------- cls : class special methods will be defined and pinned to this class """ _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, arith_method, comp_method, bool_method, special=True) # inplace operators (I feel like these should get passed an `inplace=True` # or just be removed def _wrap_inplace_method(method): """ return an inplace wrapper for this method """ def f(self, other): result = method(self, other) # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace(result.reindex_like(self, copy=False)._data, verify_is_copy=False) return self f.__name__ = "__i{name}__".format(name=method.__name__.strip("__")) return f new_methods.update( dict(__iadd__=_wrap_inplace_method(new_methods["__add__"]), __isub__=_wrap_inplace_method(new_methods["__sub__"]), __imul__=_wrap_inplace_method(new_methods["__mul__"]), __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]), __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]), __imod__=_wrap_inplace_method(new_methods["__mod__"]), __ipow__=_wrap_inplace_method(new_methods["__pow__"]))) new_methods.update( dict(__iand__=_wrap_inplace_method(new_methods["__and__"]), __ior__=_wrap_inplace_method(new_methods["__or__"]), __ixor__=_wrap_inplace_method(new_methods["__xor__"]))) add_methods(cls, new_methods=new_methods) def add_flex_arithmetic_methods(cls): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class """ flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method, bool_method=None, special=False) new_methods.update(dict(multiply=new_methods['mul'], subtract=new_methods['sub'], divide=new_methods['div'])) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in ('ror_', 'rxor', 'rand_')) add_methods(cls, new_methods=new_methods) # ----------------------------------------------------------------------------- # Series def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ # ToDo: Different from _align_method_FRAME, list, tuple and ndarray # are not coerced here # because Series has inconsistencies described in #13637 if isinstance(right, ABCSeries): # avoid repeated alignment if not left.index.equals(right.index): if align_asobject: # to keep original value's dtype for bool ops left = left.astype(object) right = right.astype(object) left, right = left.align(right, copy=False) return left, right def _construct_result(left, result, index, name, dtype=None): """ If the raw op result has a non-None name (e.g. it is an Index object) and the name argument is None, then passing name to the constructor will not be enough; we still need to override the name attribute. """ out = left._constructor(result, index=index, dtype=dtype) out = out.__finalize__(left) out.name = name return out def _construct_divmod_result(left, result, index, name, dtype=None): """divmod returns a tuple of like indexed series instead of a single series. """ return ( _construct_result(left, result[0], index=index, name=name, dtype=dtype), _construct_result(left, result[1], index=index, name=name, dtype=dtype), ) def _arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) construct_result = (_construct_divmod_result if op in [divmod, rdivmod] else _construct_result) def na_op(x, y): """ Return the result of evaluating op on the passed in values. If native types are not compatible, try coersion to object dtype. Parameters ---------- x : array-like y : array-like or scalar Returns ------- array-like Raises ------ TypeError : invalid operation """ import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: result = masked_arith_op(x, y, op) except Exception: # TODO: more specific? if is_object_dtype(x): return libalgos.arrmap_object(x, lambda val: op(val, y)) raise result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result def wrapper(left, right): if isinstance(right, ABCDataFrame): return NotImplemented left, right = _align_method_SERIES(left, right) res_name = get_op_result_name(left, right) right = maybe_upcast_for_op(right) if is_categorical_dtype(left): raise TypeError("{typ} cannot perform the operation " "{op}".format(typ=type(left).__name__, op=str_rep)) elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left): # Give dispatch_to_index_op a chance for tests like # test_dt64_series_add_intlike, which the index dispatching handles # specifically. result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) elif (is_extension_array_dtype(left) or (is_extension_array_dtype(right) and not is_scalar(right))): # GH#22378 disallow scalar to exclude e.g. "category", "Int64" return dispatch_to_extension_op(op, left, right) elif is_timedelta64_dtype(left): result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) return construct_result(left, result, index=left.index, name=res_name) elif is_timedelta64_dtype(right): # We should only get here with non-scalar or timedelta64('NaT') # values for right # Note: we cannot use dispatch_to_index_op because # that may incorrectly raise TypeError when we # should get NullFrequencyError result = op(pd.Index(left), right) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): rvalues = rvalues.values with np.errstate(all='ignore'): result = na_op(lvalues, rvalues) return construct_result(left, result, index=left.index, name=res_name, dtype=None) wrapper.__name__ = op_name return wrapper def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): if not is_object_dtype(y.dtype): y = y.astype(np.object_) if isinstance(y, (ABCSeries, ABCIndex)): y = y.values result = libops.vec_compare(x, y, op) else: result = libops.scalar_compare(x, y, op) return result def _comp_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) masker = _gen_eval_kwargs(op_name).get('masker', False) def na_op(x, y): # TODO: # should have guarantess on what x, y can be type-wise # Extension Dtypes are not called here # Checking that cases that were once handled here are no longer # reachable. assert not (is_categorical_dtype(y) and not is_scalar(y)) if is_object_dtype(x.dtype): result = _comp_method_OBJECT_ARRAY(op, x, y) elif is_datetimelike_v_numeric(x, y): return invalid_comparison(x, y, op) else: # we want to compare like types # we only want to convert to integer like if # we are not NotImplemented, otherwise # we would allow datetime64 (but viewed as i8) against # integer comparisons # we have a datetime/timedelta and may need to convert assert not needs_i8_conversion(x) mask = None if not is_scalar(y) and needs_i8_conversion(y): mask = isna(x) | isna(y) y = y.view('i8') x = x.view('i8') method = getattr(x, op_name, None) if method is not None: with np.errstate(all='ignore'): result = method(y) if result is NotImplemented: return invalid_comparison(x, y, op) else: result = op(x, y) if mask is not None and mask.any(): result[mask] = masker return result def wrapper(self, other, axis=None): # Validate the axis parameter if axis is not None: self._get_axis_number(axis) res_name = get_op_result_name(self, other) if isinstance(other, list): # TODO: same for tuples? other = np.asarray(other) if isinstance(other, ABCDataFrame): # pragma: no cover # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, ABCSeries) and not self._indexed_same(other): raise ValueError("Can only compare identically-labeled " "Series objects") elif is_categorical_dtype(self): # Dispatch to Categorical implementation; pd.CategoricalIndex # behavior is non-canonical GH#19513 res_values = dispatch_to_index_op(op, self, other, pd.Categorical) return self._constructor(res_values, index=self.index, name=res_name) elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): # Dispatch to DatetimeIndex to ensure identical # Series/Index behavior if (isinstance(other, datetime.date) and not isinstance(other, datetime.datetime)): # https://github.com/pandas-dev/pandas/issues/21152 # Compatibility for difference between Series comparison w/ # datetime and date msg = ( "Comparing Series of datetimes with 'datetime.date'. " "Currently, the 'datetime.date' is coerced to a " "datetime. In the future pandas will not coerce, " "and {future}. " "To retain the current behavior, " "convert the 'datetime.date' to a datetime with " "'pd.Timestamp'." ) if op in {operator.lt, operator.le, operator.gt, operator.ge}: future = "a TypeError will be raised" else: future = ( "'the values will not compare equal to the " "'datetime.date'" ) msg = '\n'.join(textwrap.wrap(msg.format(future=future))) warnings.warn(msg, FutureWarning, stacklevel=2) other = pd.Timestamp(other) res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex) return self._constructor(res_values, index=self.index, name=res_name) elif is_timedelta64_dtype(self): res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex) return self._constructor(res_values, index=self.index, name=res_name) elif (is_extension_array_dtype(self) or (is_extension_array_dtype(other) and not is_scalar(other))): # Note: the `not is_scalar(other)` condition rules out # e.g. other == "category" return dispatch_to_extension_op(op, self, other) elif isinstance(other, ABCSeries): # By this point we have checked that self._indexed_same(other) res_values = na_op(self.values, other.values) # rename is needed in case res_name is None and res_values.name # is not. return self._constructor(res_values, index=self.index, name=res_name).rename(res_name) elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array # as it will broadcast if other.ndim != 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') res_values = na_op(self.values, np.asarray(other)) result = self._constructor(res_values, index=self.index) # rename is needed in case res_name is None and self.name # is not. return result.__finalize__(self).rename(res_name) elif is_scalar(other) and isna(other): # numpy does not like comparisons vs None if op is operator.ne: res_values = np.ones(len(self), dtype=bool) else: res_values = np.zeros(len(self), dtype=bool) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') else: values = self.get_values() with np.errstate(all='ignore'): res = na_op(values, other) if is_scalar(res): raise TypeError('Could not compare {typ} type with Series' .format(typ=type(other))) # always return a full value series here res_values = com.values_from_object(res) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') wrapper.__name__ = op_name return wrapper def _bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def na_op(x, y): try: result = op(x, y) except TypeError: assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here assert not (is_bool_dtype(x) and is_bool_dtype(y)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru assert lib.is_scalar(y) if not isna(y): y = bool(y) try: result = libops.scalar_binop(x, y, op) except (TypeError, ValueError, AttributeError, OverflowError, NotImplementedError): raise TypeError("cannot compare a dtyped [{dtype}] array " "with a scalar of type [{typ}]" .format(dtype=x.dtype, typ=type(y).__name__)) return result fill_int = lambda x: x.fillna(0) fill_bool = lambda x: x.fillna(False).astype(bool) def wrapper(self, other): is_self_int_dtype = is_integer_dtype(self.dtype) self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) ovalues = other.values finalizer = lambda x: x else: # scalars, list, tuple, np.array is_other_int_dtype = is_integer_dtype(np.asarray(other)) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? other = construct_1d_object_array_from_listlike(other) ovalues = other finalizer = lambda x: x.__finalize__(self) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops filler = (fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool) res_values = na_op(self.values, ovalues) unfilled = self._constructor(res_values, index=self.index, name=res_name) filled = filler(unfilled) return finalizer(filled) wrapper.__name__ = op_name return wrapper def _flex_method_SERIES(cls, op, special): name = _get_op_name(op, special) doc = _make_flex_doc(name, 'series') @Appender(doc) def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # validate axis if axis is not None: self._get_axis_number(axis) if isinstance(other, ABCSeries): return self._binop(other, op, level=level, fill_value=fill_value) elif isinstance(other, (np.ndarray, list, tuple)): if len(other) != len(self): raise ValueError('Lengths must be equal') other = self._constructor(other, self.index) return self._binop(other, op, level=level, fill_value=fill_value) else: if fill_value is not None: self = self.fillna(fill_value) return self._constructor(op(self, other), self.index).__finalize__(self) flex_wrapper.__name__ = name return flex_wrapper # ----------------------------------------------------------------------------- # DataFrame def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None): """ Apply binary operator `func` to self, other using alignment and fill conventions determined by the fill_value, axis, and level kwargs. Parameters ---------- self : DataFrame other : Series func : binary operator fill_value : object, default None axis : {0, 1, 'columns', 'index', None}, default None level : int or None, default None Returns ------- result : DataFrame """ if fill_value is not None: raise NotImplementedError("fill_value {fill} not supported." .format(fill=fill_value)) if axis is not None: axis = self._get_axis_number(axis) if axis == 0: return self._combine_match_index(other, func, level=level) else: return self._combine_match_columns(other, func, level=level) else: if not len(other): return self * np.nan if not len(self): # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) # default axis is columns return self._combine_match_columns(other, func, level=level) def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ def to_series(right): msg = ('Unable to coerce to Series, length must be {req_len}: ' 'given {given_len}') if axis is not None and left._get_axis_name(axis) == 'index': if len(left.index) != len(right): raise ValueError(msg.format(req_len=len(left.index), given_len=len(right))) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError(msg.format(req_len=len(left.columns), given_len=len(right))) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError("Unable to coerce to DataFrame, shape " "must be {req_shape}: given {given_shape}" .format(req_shape=left.shape, given_shape=right.shape)) elif right.ndim > 2: raise ValueError('Unable to coerce to Series/DataFrame, dim ' 'must be <= 2: {dim}'.format(dim=right.shape)) elif (is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame))): # GH17901 right = to_series(right) return right def _arith_method_FRAME(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) default_axis = _get_frame_op_default_axis(op_name) def na_op(x, y): import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) except TypeError: result = masked_arith_op(x, y, op) result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result if op_name in _op_descriptions: # i.e. include "add" but not "__add__" doc = _make_flex_doc(op_name, 'dataframe') else: doc = _arith_doc_FRAME % op_name @Appender(doc) def f(self, other, axis=default_axis, level=None, fill_value=None): other = _align_method_FRAME(self, other, axis) if isinstance(other, ABCDataFrame): # Another DataFrame pass_op = op if should_series_dispatch(self, other, op) else na_op return self._combine_frame(other, pass_op, fill_value, level) elif isinstance(other, ABCSeries): # For these values of `axis`, we end up dispatching to Series op, # so do not want the masked op. pass_op = op if axis in [0, "columns", None] else na_op return _combine_series_frame(self, other, pass_op, fill_value=fill_value, axis=axis, level=level) else: if fill_value is not None: self = self.fillna(fill_value) assert np.ndim(other) == 0 return self._combine_const(other, op) f.__name__ = op_name return f def _flex_comp_method_FRAME(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) default_axis = _get_frame_op_default_axis(op_name) def na_op(x, y): try: with np.errstate(invalid='ignore'): result = op(x, y) except TypeError: result = mask_cmp_op(x, y, op) return result doc = _flex_comp_doc_FRAME.format(op_name=op_name, desc=_op_descriptions[op_name]['desc']) @Appender(doc) def f(self, other, axis=default_axis, level=None): other = _align_method_FRAME(self, other, axis) if isinstance(other, ABCDataFrame): # Another DataFrame if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level, copy=False) return dispatch_to_series(self, other, na_op, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, na_op, fill_value=None, axis=axis, level=level) else: assert np.ndim(other) == 0, other return self._combine_const(other, na_op) f.__name__ = op_name return f def _comp_method_FRAME(cls, func, special): str_rep = _get_opstr(func, cls) op_name = _get_op_name(func, special) @Appender('Wrapper for comparison method {name}'.format(name=op_name)) def f(self, other): other = _align_method_FRAME(self, other, axis=None) if isinstance(other, ABCDataFrame): # Another DataFrame if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') return dispatch_to_series(self, other, func, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None) else: # straight boolean comparisons we want to allow all columns # (regardless of dtype to pass thru) See #4537 for discussion. res = self._combine_const(other, func) return res.fillna(True).astype(bool) f.__name__ = op_name return f # ----------------------------------------------------------------------------- # Sparse def _cast_sparse_series_op(left, right, opname): """ For SparseSeries operation, coerce to float64 if the result is expected to have NaN or inf values Parameters ---------- left : SparseArray right : SparseArray opname : str Returns ------- left : SparseArray right : SparseArray """ from pandas.core.sparse.api import SparseDtype opname = opname.strip('_') # TODO: This should be moved to the array? if is_integer_dtype(left) and is_integer_dtype(right): # series coerces to float64 if result should have NaN/inf if opname in ('floordiv', 'mod') and (right.to_dense() == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) elif opname in ('rfloordiv', 'rmod') and (left.to_dense() == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) return left, right def _arith_method_SPARSE_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): if isinstance(other, ABCDataFrame): return NotImplemented elif isinstance(other, ABCSeries): if not isinstance(other, ABCSparseSeries): other = other.to_sparse(fill_value=self.fill_value) return _sparse_series_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): new_values = op(self.values, other) return self._constructor(new_values, index=self.index, name=self.name) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper def _sparse_series_op(left, right, op, name): left, right = left.align(right, join='outer', copy=False) new_index = left.index new_name = get_op_result_name(left, right) from pandas.core.arrays.sparse import _sparse_array_op lvalues, rvalues = _cast_sparse_series_op(left.values, right.values, name) result = _sparse_array_op(lvalues, rvalues, op, name) return left._constructor(result, index=new_index, name=new_name) def _arith_method_SPARSE_ARRAY(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): from pandas.core.arrays.sparse.array import ( SparseArray, _sparse_array_op, _wrap_result, _get_fill) if isinstance(other, np.ndarray): if len(self) != len(other): raise AssertionError("length mismatch: {self} vs. {other}" .format(self=len(self), other=len(other))) if not isinstance(other, SparseArray): dtype = getattr(other, 'dtype', None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) return _sparse_array_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): fill = op(_get_fill(self), np.asarray(other)) result = op(self.sp_values, other) return _wrap_result(op_name, result, self.sp_index, fill) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper
""" Tests for DatetimeArray """ import operator import numpy as np import pytest from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import sequence_to_dt64ns import pandas.util.testing as tm class TestDatetimeArrayConstructor: def test_freq_validation(self): # GH#24623 check that invalid instances cannot be created with the # public constructor arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 msg = ("Inferred frequency H from passed values does not " "conform to passed frequency W-SUN") with pytest.raises(ValueError, match=msg): DatetimeArray(arr, freq="W") @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime, pd.DatetimeIndex]) def test_mixing_naive_tzaware_raises(self, meth): # GH#24569 arr = np.array([pd.Timestamp('2000'), pd.Timestamp('2000', tz='CET')]) msg = ('Cannot mix tz-aware with tz-naive values|' 'Tz-aware datetime.datetime cannot be converted ' 'to datetime64 unless utc=True') for obj in [arr, arr[::-1]]: # check that we raise regardless of whether naive is found # before aware or vice-versa with pytest.raises(ValueError, match=msg): meth(obj) def test_from_pandas_array(self): arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 result = DatetimeArray._from_sequence(arr, freq='infer') expected = pd.date_range('1970-01-01', periods=5, freq='H')._data tm.assert_datetime_array_equal(result, expected) def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') with pytest.raises(TypeError, match='Timezone of the array'): DatetimeArray(arr, dtype=dtype) def test_non_array_raises(self): with pytest.raises(ValueError, match='list'): DatetimeArray([1, 2, 3]) def test_other_type_raises(self): with pytest.raises(ValueError, match="The dtype of 'values' is incorrect.*bool"): DatetimeArray(np.array([1, 2, 3], dtype='bool')) def test_incorrect_dtype_raises(self): with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): DatetimeArray(np.array([1, 2, 3], dtype='i8'), dtype='category') def test_freq_infer_raises(self): with pytest.raises(ValueError, match='Frequency inference'): DatetimeArray(np.array([1, 2, 3], dtype='i8'), freq="infer") def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) assert arr._data is data arr = DatetimeArray(data, copy=True) assert arr._data is not data class TestDatetimeArrayComparisons: # TODO: merge this into tests/arithmetic/test_datetime64 once it is # sufficiently robust def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators): # arbitrary tz-naive DatetimeIndex opname = all_compare_operators.strip('_') op = getattr(operator, opname) dti = pd.date_range('2016-01-1', freq='MS', periods=9, tz=None) arr = DatetimeArray(dti) assert arr.freq == dti.freq assert arr.tz == dti.tz right = dti expected = np.ones(len(arr), dtype=bool) if opname in ['ne', 'gt', 'lt']: # for these the comparisons should be all-False expected = ~expected result = op(arr, arr) tm.assert_numpy_array_equal(result, expected) for other in [right, np.array(right)]: # TODO: add list and tuple, and object-dtype once those # are fixed in the constructor result = op(arr, other) tm.assert_numpy_array_equal(result, expected) result = op(other, arr) tm.assert_numpy_array_equal(result, expected) class TestDatetimeArray: def test_astype_to_same(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) assert result is arr @pytest.mark.parametrize("dtype", [ int, np.int32, np.int64, 'uint32', 'uint64', ]) def test_astype_int(self, dtype): arr = DatetimeArray._from_sequence([pd.Timestamp('2000'), pd.Timestamp('2001')]) result = arr.astype(dtype) if np.dtype(dtype).kind == 'u': expected_dtype = np.dtype('uint64') else: expected_dtype = np.dtype('int64') expected = arr.astype(expected_dtype) assert result.dtype == expected_dtype tm.assert_numpy_array_equal(result, expected) def test_tz_setter_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(AttributeError, match='tz_localize'): arr.tz = 'UTC' def test_setitem_different_tz_raises(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) with pytest.raises(ValueError, match="None"): arr[0] = pd.Timestamp('2000') with pytest.raises(ValueError, match="US/Central"): arr[0] = pd.Timestamp('2000', tz="US/Eastern") def test_setitem_clears_freq(self): a = DatetimeArray(pd.date_range('2000', periods=2, freq='D', tz='US/Central')) a[0] = pd.Timestamp("2000", tz="US/Central") assert a.freq is None def test_repeat_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti) repeated = arr.repeat([1, 1]) # preserves tz and values, but not freq expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) tm.assert_equal(repeated, expected) def test_value_counts_preserves_tz(self): dti = pd.date_range('2000', periods=2, freq='D', tz='US/Central') arr = DatetimeArray(dti).repeat([4, 3]) result = arr.value_counts() # Note: not tm.assert_index_equal, since `freq`s do not match assert result.index.equals(dti) arr[-2] = pd.NaT result = arr.value_counts() expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('method', ['pad', 'backfill']) def test_fillna_preserves_tz(self, method): dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central') arr = DatetimeArray(dti, copy=True) arr[2] = pd.NaT fill_val = dti[1] if method == 'pad' else dti[3] expected = DatetimeArray._from_sequence( [dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz='US/Central' ) result = arr.fillna(method=method) tm.assert_extension_array_equal(result, expected) # assert that arr and dti were not modified in-place assert arr[2] is pd.NaT assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central') def test_array_interface_tz(self): tz = "US/Central" data = DatetimeArray(pd.date_range('2017', periods=2, tz=tz)) result = np.asarray(data) expected = np.array([pd.Timestamp('2017-01-01T00:00:00', tz=tz), pd.Timestamp('2017-01-02T00:00:00', tz=tz)], dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype='M8[ns]') expected = np.array(['2017-01-01T06:00:00', '2017-01-02T06:00:00'], dtype="M8[ns]") tm.assert_numpy_array_equal(result, expected) def test_array_interface(self): data = DatetimeArray(pd.date_range('2017', periods=2)) expected = np.array(['2017-01-01T00:00:00', '2017-01-02T00:00:00'], dtype='datetime64[ns]') result = np.asarray(data) tm.assert_numpy_array_equal(result, expected) result = np.asarray(data, dtype=object) expected = np.array([pd.Timestamp('2017-01-01T00:00:00'), pd.Timestamp('2017-01-02T00:00:00')], dtype=object) tm.assert_numpy_array_equal(result, expected) class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') with pytest.raises(TypeError, match='data is already tz-aware'): sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC")) def test_tz_dtype_matches(self): arr = DatetimeArray._from_sequence(['2000'], tz='US/Central') result, _, _ = sequence_to_dt64ns( arr, dtype=DatetimeTZDtype(tz="US/Central")) tm.assert_numpy_array_equal(arr._data, result) class TestReductions: @pytest.mark.parametrize("tz", [None, "US/Central"]) def test_min_max(self, tz): arr = DatetimeArray._from_sequence([ '2000-01-03', '2000-01-03', 'NaT', '2000-01-02', '2000-01-05', '2000-01-04', ], tz=tz) result = arr.min() expected = pd.Timestamp('2000-01-02', tz=tz) assert result == expected result = arr.max() expected = pd.Timestamp('2000-01-05', tz=tz) assert result == expected result = arr.min(skipna=False) assert result is pd.NaT result = arr.max(skipna=False) assert result is pd.NaT @pytest.mark.parametrize("tz", [None, "US/Central"]) @pytest.mark.parametrize('skipna', [True, False]) def test_min_max_empty(self, skipna, tz): arr = DatetimeArray._from_sequence([], tz=tz) result = arr.min(skipna=skipna) assert result is pd.NaT result = arr.max(skipna=skipna) assert result is pd.NaT
cbertinato/pandas
pandas/tests/arrays/test_datetimes.py
pandas/core/ops.py
''' Jottings to work out format for __function_workspace__ matrix at end of mat file. ''' import os.path import io from numpy.compat import asstr from scipy.io.matlab.mio5 import MatFile5Reader test_data_path = os.path.join(os.path.dirname(__file__), 'data') def read_minimat_vars(rdr): rdr.initialize_read() mdict = {'__globals__': []} i = 0 while not rdr.end_of_stream(): hdr, next_position = rdr.read_var_header() name = asstr(hdr.name) if name == '': name = 'var_%d' % i i += 1 res = rdr.read_var_array(hdr, process=False) rdr.mat_stream.seek(next_position) mdict[name] = res if hdr.is_global: mdict['__globals__'].append(name) return mdict def read_workspace_vars(fname): fp = open(fname, 'rb') rdr = MatFile5Reader(fp, struct_as_record=True) vars = rdr.get_variables() fws = vars['__function_workspace__'] ws_bs = io.BytesIO(fws.tostring()) ws_bs.seek(2) rdr.mat_stream = ws_bs # Guess byte order. mi = rdr.mat_stream.read(2) rdr.byte_order = mi == b'IM' and '<' or '>' rdr.mat_stream.read(4) # presumably byte padding mdict = read_minimat_vars(rdr) fp.close() return mdict def test_jottings(): # example fname = os.path.join(test_data_path, 'parabola.mat') read_workspace_vars(fname)
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/io/matlab/tests/test_mio_funcs.py
# -*- coding: latin-1 -*- ''' Nose test generators Need function load / save / roundtrip tests ''' import os from collections import OrderedDict from os.path import join as pjoin, dirname from glob import glob from io import BytesIO from tempfile import mkdtemp import warnings import shutil import gzip from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_equal, assert_) from pytest import raises as assert_raises import numpy as np from numpy import array import scipy.sparse as SP import scipy.io.matlab.byteordercodes as boc from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat) from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader, MatlabFunction, varmats_from_mat, to_writeable, EmptyStructMarker) from scipy.io.matlab import mio5_params as mio5p test_data_path = pjoin(dirname(__file__), 'data') def mlarr(*args, **kwargs): """Convenience function to return matlab-compatible 2-D array.""" arr = np.array(*args, **kwargs) arr.shape = matdims(arr) return arr # Define cases to test theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9) case_table4 = [ {'name': 'double', 'classes': {'testdouble': 'double'}, 'expected': {'testdouble': theta} }] case_table4.append( {'name': 'string', 'classes': {'teststring': 'char'}, 'expected': {'teststring': array(['"Do nine men interpret?" "Nine men," I nod.'])} }) case_table4.append( {'name': 'complex', 'classes': {'testcomplex': 'double'}, 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} }) A = np.zeros((3,5)) A[0] = list(range(1,6)) A[:,0] = list(range(1,4)) case_table4.append( {'name': 'matrix', 'classes': {'testmatrix': 'double'}, 'expected': {'testmatrix': A}, }) case_table4.append( {'name': 'sparse', 'classes': {'testsparse': 'sparse'}, 'expected': {'testsparse': SP.coo_matrix(A)}, }) B = A.astype(complex) B[0,0] += 1j case_table4.append( {'name': 'sparsecomplex', 'classes': {'testsparsecomplex': 'sparse'}, 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, }) case_table4.append( {'name': 'multi', 'classes': {'theta': 'double', 'a': 'double'}, 'expected': {'theta': theta, 'a': A}, }) case_table4.append( {'name': 'minus', 'classes': {'testminus': 'double'}, 'expected': {'testminus': mlarr(-1)}, }) case_table4.append( {'name': 'onechar', 'classes': {'testonechar': 'char'}, 'expected': {'testonechar': array(['r'])}, }) # Cell arrays stored as object arrays CA = mlarr(( # tuple for object array creation [], mlarr([1]), mlarr([[1,2]]), mlarr([[1,2,3]])), dtype=object).reshape(1,-1) CA[0,0] = array( ['This cell contains this string and 3 arrays of increasing length']) case_table5 = [ {'name': 'cell', 'classes': {'testcell': 'cell'}, 'expected': {'testcell': CA}}] CAE = mlarr(( # tuple for object array creation mlarr(1), mlarr(2), mlarr([]), mlarr([]), mlarr(3)), dtype=object).reshape(1,-1) objarr = np.empty((1,1),dtype=object) objarr[0,0] = mlarr(1) case_table5.append( {'name': 'scalarcell', 'classes': {'testscalarcell': 'cell'}, 'expected': {'testscalarcell': objarr} }) case_table5.append( {'name': 'emptycell', 'classes': {'testemptycell': 'cell'}, 'expected': {'testemptycell': CAE}}) case_table5.append( {'name': 'stringarray', 'classes': {'teststringarray': 'char'}, 'expected': {'teststringarray': array( ['one ', 'two ', 'three'])}, }) case_table5.append( {'name': '3dmatrix', 'classes': {'test3dmatrix': 'double'}, 'expected': { 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))} }) st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] st1 = np.zeros((1,1), dtype) st1['stringfield'][0,0] = array(['Rats live on no evil star.']) st1['doublefield'][0,0] = st_sub_arr st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) case_table5.append( {'name': 'struct', 'classes': {'teststruct': 'struct'}, 'expected': {'teststruct': st1} }) CN = np.zeros((1,2), dtype=object) CN[0,0] = mlarr(1) CN[0,1] = np.zeros((1,3), dtype=object) CN[0,1][0,0] = mlarr(2, dtype=np.uint8) CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8) CN[0,1][0,2] = np.zeros((1,2), dtype=object) CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8) CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8) case_table5.append( {'name': 'cellnest', 'classes': {'testcellnest': 'cell'}, 'expected': {'testcellnest': CN}, }) st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) st2[0,0]['one'] = mlarr(1) st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) st2[0,0]['two'][0,0]['three'] = array(['number 3']) case_table5.append( {'name': 'structnest', 'classes': {'teststructnest': 'struct'}, 'expected': {'teststructnest': st2} }) a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) a[0,0]['one'] = mlarr(1) a[0,0]['two'] = mlarr(2) a[0,1]['one'] = array(['number 1']) a[0,1]['two'] = array(['number 2']) case_table5.append( {'name': 'structarr', 'classes': {'teststructarr': 'struct'}, 'expected': {'teststructarr': a} }) ODT = np.dtype([(n, object) for n in ['expr', 'inputExpr', 'args', 'isEmpty', 'numArgs', 'version']]) MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') m0 = MO[0,0] m0['expr'] = array(['x']) m0['inputExpr'] = array([' x = INLINE_INPUTS_{1};']) m0['args'] = array(['x']) m0['isEmpty'] = mlarr(0) m0['numArgs'] = mlarr(1) m0['version'] = mlarr(1) case_table5.append( {'name': 'object', 'classes': {'testobject': 'object'}, 'expected': {'testobject': MO} }) fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb') u_str = fp_u_str.read().decode('utf-8') fp_u_str.close() case_table5.append( {'name': 'unicode', 'classes': {'testunicode': 'char'}, 'expected': {'testunicode': array([u_str])} }) case_table5.append( {'name': 'sparse', 'classes': {'testsparse': 'sparse'}, 'expected': {'testsparse': SP.coo_matrix(A)}, }) case_table5.append( {'name': 'sparsecomplex', 'classes': {'testsparsecomplex': 'sparse'}, 'expected': {'testsparsecomplex': SP.coo_matrix(B)}, }) case_table5.append( {'name': 'bool', 'classes': {'testbools': 'logical'}, 'expected': {'testbools': array([[True], [False]])}, }) case_table5_rt = case_table5[:] # Inline functions can't be concatenated in matlab, so RT only case_table5_rt.append( {'name': 'objectarray', 'classes': {'testobjectarray': 'object'}, 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}}) def types_compatible(var1, var2): """Check if types are same or compatible. 0-D numpy scalars are compatible with bare python scalars. """ type1 = type(var1) type2 = type(var2) if type1 is type2: return True if type1 is np.ndarray and var1.shape == (): return type(var1.item()) is type2 if type2 is np.ndarray and var2.shape == (): return type(var2.item()) is type1 return False def _check_level(label, expected, actual): """ Check one level of a potentially nested array """ if SP.issparse(expected): # allow different types of sparse matrices assert_(SP.issparse(actual)) assert_array_almost_equal(actual.todense(), expected.todense(), err_msg=label, decimal=5) return # Check types are as expected assert_(types_compatible(expected, actual), "Expected type %s, got %s at %s" % (type(expected), type(actual), label)) # A field in a record array may not be an ndarray # A scalar from a record array will be type np.void if not isinstance(expected, (np.void, np.ndarray, MatlabObject)): assert_equal(expected, actual) return # This is an ndarray-like thing assert_(expected.shape == actual.shape, msg='Expected shape %s, got %s at %s' % (expected.shape, actual.shape, label)) ex_dtype = expected.dtype if ex_dtype.hasobject: # array of objects if isinstance(expected, MatlabObject): assert_equal(expected.classname, actual.classname) for i, ev in enumerate(expected): level_label = "%s, [%d], " % (label, i) _check_level(level_label, ev, actual[i]) return if ex_dtype.fields: # probably recarray for fn in ex_dtype.fields: level_label = "%s, field %s, " % (label, fn) _check_level(level_label, expected[fn], actual[fn]) return if ex_dtype.type in (str, # string or bool np.unicode_, np.bool_): assert_equal(actual, expected, err_msg=label) return # Something numeric assert_array_almost_equal(actual, expected, err_msg=label, decimal=5) def _load_check_case(name, files, case): for file_name in files: matdict = loadmat(file_name, struct_as_record=True) label = "test %s; file %s" % (name, file_name) for k, expected in case.items(): k_label = "%s, variable %s" % (label, k) assert_(k in matdict, "Missing key at %s" % k_label) _check_level(k_label, expected, matdict[k]) def _whos_check_case(name, files, case, classes): for file_name in files: label = "test %s; file %s" % (name, file_name) whos = whosmat(file_name) expected_whos = [ (k, expected.shape, classes[k]) for k, expected in case.items()] whos.sort() expected_whos.sort() assert_equal(whos, expected_whos, "%s: %r != %r" % (label, whos, expected_whos) ) # Round trip tests def _rt_check_case(name, expected, format): mat_stream = BytesIO() savemat(mat_stream, expected, format=format) mat_stream.seek(0) _load_check_case(name, [mat_stream], expected) # generator for load tests def test_load(): for case in case_table4 + case_table5: name = case['name'] expected = case['expected'] filt = pjoin(test_data_path, 'test%s_*.mat' % name) files = glob(filt) assert_(len(files) > 0, "No files for test %s using filter %s" % (name, filt)) _load_check_case(name, files, expected) # generator for whos tests def test_whos(): for case in case_table4 + case_table5: name = case['name'] expected = case['expected'] classes = case['classes'] filt = pjoin(test_data_path, 'test%s_*.mat' % name) files = glob(filt) assert_(len(files) > 0, "No files for test %s using filter %s" % (name, filt)) _whos_check_case(name, files, expected, classes) # generator for round trip tests def test_round_trip(): for case in case_table4 + case_table5_rt: case_table4_names = [case['name'] for case in case_table4] name = case['name'] + '_round_trip' expected = case['expected'] for format in (['4', '5'] if case['name'] in case_table4_names else ['5']): _rt_check_case(name, expected, format) def test_gzip_simple(): xdense = np.zeros((20,20)) xdense[2,3] = 2.3 xdense[4,5] = 4.5 x = SP.csc_matrix(xdense) name = 'gzip_test' expected = {'x':x} format = '4' tmpdir = mkdtemp() try: fname = pjoin(tmpdir,name) mat_stream = gzip.open(fname, mode='wb') savemat(mat_stream, expected, format=format) mat_stream.close() mat_stream = gzip.open(fname, mode='rb') actual = loadmat(mat_stream, struct_as_record=True) mat_stream.close() finally: shutil.rmtree(tmpdir) assert_array_almost_equal(actual['x'].todense(), expected['x'].todense(), err_msg=repr(actual)) def test_multiple_open(): # Ticket #1039, on Windows: check that files are not left open tmpdir = mkdtemp() try: x = dict(x=np.zeros((2, 2))) fname = pjoin(tmpdir, "a.mat") # Check that file is not left open savemat(fname, x) os.unlink(fname) savemat(fname, x) loadmat(fname) os.unlink(fname) # Check that stream is left open f = open(fname, 'wb') savemat(f, x) f.seek(0) f.close() f = open(fname, 'rb') loadmat(f) f.seek(0) f.close() finally: shutil.rmtree(tmpdir) def test_mat73(): # Check any hdf5 files raise an error filenames = glob( pjoin(test_data_path, 'testhdf5*.mat')) assert_(len(filenames) > 0) for filename in filenames: fp = open(filename, 'rb') assert_raises(NotImplementedError, loadmat, fp, struct_as_record=True) fp.close() def test_warnings(): # This test is an echo of the previous behavior, which was to raise a # warning if the user triggered a search for mat files on the Python system # path. We can remove the test in the next version after upcoming (0.13). fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat') with warnings.catch_warnings(): warnings.simplefilter('error') # This should not generate a warning loadmat(fname, struct_as_record=True) # This neither loadmat(fname, struct_as_record=False) def test_regression_653(): # Saving a dictionary with only invalid keys used to raise an error. Now we # save this as an empty struct in matlab space. sio = BytesIO() savemat(sio, {'d':{1:2}}, format='5') back = loadmat(sio)['d'] # Check we got an empty struct equivalent assert_equal(back.shape, (1,1)) assert_equal(back.dtype, np.dtype(object)) assert_(back[0,0] is None) def test_structname_len(): # Test limit for length of field names in structs lim = 31 fldname = 'a' * lim st1 = np.zeros((1,1), dtype=[(fldname, object)]) savemat(BytesIO(), {'longstruct': st1}, format='5') fldname = 'a' * (lim+1) st1 = np.zeros((1,1), dtype=[(fldname, object)]) assert_raises(ValueError, savemat, BytesIO(), {'longstruct': st1}, format='5') def test_4_and_long_field_names_incompatible(): # Long field names option not supported in 4 my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)]) assert_raises(ValueError, savemat, BytesIO(), {'my_struct':my_struct}, format='4', long_field_names=True) def test_long_field_names(): # Test limit for length of field names in structs lim = 63 fldname = 'a' * lim st1 = np.zeros((1,1), dtype=[(fldname, object)]) savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) fldname = 'a' * (lim+1) st1 = np.zeros((1,1), dtype=[(fldname, object)]) assert_raises(ValueError, savemat, BytesIO(), {'longstruct': st1}, format='5',long_field_names=True) def test_long_field_names_in_struct(): # Regression test - long_field_names was erased if you passed a struct # within a struct lim = 63 fldname = 'a' * lim cell = np.ndarray((1,2),dtype=object) st1 = np.zeros((1,1), dtype=[(fldname, object)]) cell[0,0] = st1 cell[0,1] = st1 savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True) # # Check to make sure it fails with long field names off # assert_raises(ValueError, savemat, BytesIO(), {'longstruct': cell}, format='5', long_field_names=False) def test_cell_with_one_thing_in_it(): # Regression test - make a cell array that's 1 x 2 and put two # strings in it. It works. Make a cell array that's 1 x 1 and put # a string in it. It should work but, in the old days, it didn't. cells = np.ndarray((1,2),dtype=object) cells[0,0] = 'Hello' cells[0,1] = 'World' savemat(BytesIO(), {'x': cells}, format='5') cells = np.ndarray((1,1),dtype=object) cells[0,0] = 'Hello, world' savemat(BytesIO(), {'x': cells}, format='5') def test_writer_properties(): # Tests getting, setting of properties of matrix writer mfw = MatFile5Writer(BytesIO()) assert_equal(mfw.global_vars, []) mfw.global_vars = ['avar'] assert_equal(mfw.global_vars, ['avar']) assert_equal(mfw.unicode_strings, False) mfw.unicode_strings = True assert_equal(mfw.unicode_strings, True) assert_equal(mfw.long_field_names, False) mfw.long_field_names = True assert_equal(mfw.long_field_names, True) def test_use_small_element(): # Test whether we're using small data element or not sio = BytesIO() wtr = MatFile5Writer(sio) # First check size for no sde for name arr = np.zeros(10) wtr.put_variables({'aaaaa': arr}) w_sz = len(sio.getvalue()) # Check small name results in largish difference in size sio.truncate(0) sio.seek(0) wtr.put_variables({'aaaa': arr}) assert_(w_sz - len(sio.getvalue()) > 4) # Whereas increasing name size makes less difference sio.truncate(0) sio.seek(0) wtr.put_variables({'aaaaaa': arr}) assert_(len(sio.getvalue()) - w_sz < 4) def test_save_dict(): # Test that dict can be saved (as recarray), loaded as matstruct dict_types = ((dict, False), (OrderedDict, True),) ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)]) ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)]) for dict_type, is_ordered in dict_types: # Initialize with tuples to keep order for OrderedDict d = dict_type([('a', 1), ('b', 2)]) stream = BytesIO() savemat(stream, {'dict': d}) stream.seek(0) vals = loadmat(stream)['dict'] assert_equal(set(vals.dtype.names), set(['a', 'b'])) if is_ordered: # Input was ordered, output in ab order assert_array_equal(vals, ab_exp) else: # Not ordered input, either order output if vals.dtype.names[0] == 'a': assert_array_equal(vals, ab_exp) else: assert_array_equal(vals, ba_exp) def test_1d_shape(): # New 5 behavior is 1D -> row vector arr = np.arange(5) for format in ('4', '5'): # Column is the default stream = BytesIO() savemat(stream, {'oned': arr}, format=format) vals = loadmat(stream) assert_equal(vals['oned'].shape, (1, 5)) # can be explicitly 'column' for oned_as stream = BytesIO() savemat(stream, {'oned':arr}, format=format, oned_as='column') vals = loadmat(stream) assert_equal(vals['oned'].shape, (5,1)) # but different from 'row' stream = BytesIO() savemat(stream, {'oned':arr}, format=format, oned_as='row') vals = loadmat(stream) assert_equal(vals['oned'].shape, (1,5)) def test_compression(): arr = np.zeros(100).reshape((5,20)) arr[2,10] = 1 stream = BytesIO() savemat(stream, {'arr':arr}) raw_len = len(stream.getvalue()) vals = loadmat(stream) assert_array_equal(vals['arr'], arr) stream = BytesIO() savemat(stream, {'arr':arr}, do_compression=True) compressed_len = len(stream.getvalue()) vals = loadmat(stream) assert_array_equal(vals['arr'], arr) assert_(raw_len > compressed_len) # Concatenate, test later arr2 = arr.copy() arr2[0,0] = 1 stream = BytesIO() savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False) vals = loadmat(stream) assert_array_equal(vals['arr2'], arr2) stream = BytesIO() savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True) vals = loadmat(stream) assert_array_equal(vals['arr2'], arr2) def test_single_object(): stream = BytesIO() savemat(stream, {'A':np.array(1, dtype=object)}) def test_skip_variable(): # Test skipping over the first of two variables in a MAT file # using mat_reader_factory and put_variables to read them in. # # This is a regression test of a problem that's caused by # using the compressed file reader seek instead of the raw file # I/O seek when skipping over a compressed chunk. # # The problem arises when the chunk is large: this file has # a 256x256 array of random (uncompressible) doubles. # filename = pjoin(test_data_path,'test_skip_variable.mat') # # Prove that it loads with loadmat # d = loadmat(filename, struct_as_record=True) assert_('first' in d) assert_('second' in d) # # Make the factory # factory, file_opened = mat_reader_factory(filename, struct_as_record=True) # # This is where the factory breaks with an error in MatMatrixGetter.to_next # d = factory.get_variables('second') assert_('second' in d) factory.mat_stream.close() def test_empty_struct(): # ticket 885 filename = pjoin(test_data_path,'test_empty_struct.mat') # before ticket fix, this would crash with ValueError, empty data # type d = loadmat(filename, struct_as_record=True) a = d['a'] assert_equal(a.shape, (1,1)) assert_equal(a.dtype, np.dtype(object)) assert_(a[0,0] is None) stream = BytesIO() arr = np.array((), dtype='U') # before ticket fix, this used to give data type not understood savemat(stream, {'arr':arr}) d = loadmat(stream) a2 = d['arr'] assert_array_equal(a2, arr) def test_save_empty_dict(): # saving empty dict also gives empty struct stream = BytesIO() savemat(stream, {'arr': {}}) d = loadmat(stream) a = d['arr'] assert_equal(a.shape, (1,1)) assert_equal(a.dtype, np.dtype(object)) assert_(a[0,0] is None) def assert_any_equal(output, alternatives): """ Assert `output` is equal to at least one element in `alternatives` """ one_equal = False for expected in alternatives: if np.all(output == expected): one_equal = True break assert_(one_equal) def test_to_writeable(): # Test to_writeable function res = to_writeable(np.array([1])) # pass through ndarrays assert_equal(res.shape, (1,)) assert_array_equal(res, 1) # Dict fields can be written in any order expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')]) expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')]) alternatives = (expected1, expected2) assert_any_equal(to_writeable({'a':1,'b':2}), alternatives) # Fields with underscores discarded assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives) # Not-string fields discarded assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives) # String fields that are valid Python identifiers discarded assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives) # Object with field names is equivalent class klass(object): pass c = klass c.a = 1 c.b = 2 assert_any_equal(to_writeable(c), alternatives) # empty list and tuple go to empty array res = to_writeable([]) assert_equal(res.shape, (0,)) assert_equal(res.dtype.type, np.float64) res = to_writeable(()) assert_equal(res.shape, (0,)) assert_equal(res.dtype.type, np.float64) # None -> None assert_(to_writeable(None) is None) # String to strings assert_equal(to_writeable('a string').dtype.type, np.str_) # Scalars to numpy to NumPy scalars res = to_writeable(1) assert_equal(res.shape, ()) assert_equal(res.dtype.type, np.array(1).dtype.type) assert_array_equal(res, 1) # Empty dict returns EmptyStructMarker assert_(to_writeable({}) is EmptyStructMarker) # Object does not have (even empty) __dict__ assert_(to_writeable(object()) is None) # Custom object does have empty __dict__, returns EmptyStructMarker class C(object): pass assert_(to_writeable(c()) is EmptyStructMarker) # dict keys with legal characters are convertible res = to_writeable({'a': 1})['a'] assert_equal(res.shape, (1,)) assert_equal(res.dtype.type, np.object_) # Only fields with illegal characters, falls back to EmptyStruct assert_(to_writeable({'1':1}) is EmptyStructMarker) assert_(to_writeable({'_a':1}) is EmptyStructMarker) # Unless there are valid fields, in which case structured array assert_equal(to_writeable({'1':1, 'f': 2}), np.array([(2,)], dtype=[('f', '|O8')])) def test_recarray(): # check roundtrip of structured array dt = [('f1', 'f8'), ('f2', 'S10')] arr = np.zeros((2,), dtype=dt) arr[0]['f1'] = 0.5 arr[0]['f2'] = 'python' arr[1]['f1'] = 99 arr[1]['f2'] = 'not perl' stream = BytesIO() savemat(stream, {'arr': arr}) d = loadmat(stream, struct_as_record=False) a20 = d['arr'][0,0] assert_equal(a20.f1, 0.5) assert_equal(a20.f2, 'python') d = loadmat(stream, struct_as_record=True) a20 = d['arr'][0,0] assert_equal(a20['f1'], 0.5) assert_equal(a20['f2'], 'python') # structs always come back as object types assert_equal(a20.dtype, np.dtype([('f1', 'O'), ('f2', 'O')])) a21 = d['arr'].flat[1] assert_equal(a21['f1'], 99) assert_equal(a21['f2'], 'not perl') def test_save_object(): class C(object): pass c = C() c.field1 = 1 c.field2 = 'a string' stream = BytesIO() savemat(stream, {'c': c}) d = loadmat(stream, struct_as_record=False) c2 = d['c'][0,0] assert_equal(c2.field1, 1) assert_equal(c2.field2, 'a string') d = loadmat(stream, struct_as_record=True) c2 = d['c'][0,0] assert_equal(c2['field1'], 1) assert_equal(c2['field2'], 'a string') def test_read_opts(): # tests if read is seeing option sets, at initialization and after # initialization arr = np.arange(6).reshape(1,6) stream = BytesIO() savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) back_dict = rdr.get_variables() rarr = back_dict['a'] assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, squeeze_me=True) assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) rdr.squeeze_me = False assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, byte_order=boc.native_code) assert_array_equal(rdr.get_variables()['a'], arr) # inverted byte code leads to error on read because of swapped # header etc. rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) assert_raises(Exception, rdr.get_variables) rdr.byte_order = boc.native_code assert_array_equal(rdr.get_variables()['a'], arr) arr = np.array(['a string']) stream.truncate(0) stream.seek(0) savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) assert_array_equal(rdr.get_variables()['a'], arr) rdr = MatFile5Reader(stream, chars_as_strings=False) carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) assert_array_equal(rdr.get_variables()['a'], carr) rdr.chars_as_strings = True assert_array_equal(rdr.get_variables()['a'], arr) def test_empty_string(): # make sure reading empty string does not raise error estring_fname = pjoin(test_data_path, 'single_empty_string.mat') fp = open(estring_fname, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_array_equal(d['a'], np.array([], dtype='U1')) # Empty string round trip. Matlab cannot distinguish # between a string array that is empty, and a string array # containing a single empty string, because it stores strings as # arrays of char. There is no way of having an array of char that # is not empty, but contains an empty string. stream = BytesIO() savemat(stream, {'a': np.array([''])}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['a'], np.array([], dtype='U1')) stream.truncate(0) stream.seek(0) savemat(stream, {'a': np.array([], dtype='U1')}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['a'], np.array([], dtype='U1')) stream.close() def test_corrupted_data(): import zlib for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'), (zlib.error, 'corrupted_zlib_checksum.mat')]: with open(pjoin(test_data_path, fname), 'rb') as fp: rdr = MatFile5Reader(fp) assert_raises(exc, rdr.get_variables) def test_corrupted_data_check_can_be_disabled(): with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp: rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False) rdr.get_variables() def test_read_both_endian(): # make sure big- and little- endian data is read correctly for fname in ('big_endian.mat', 'little_endian.mat'): fp = open(pjoin(test_data_path, fname), 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_array_equal(d['strings'], np.array([['hello'], ['world']], dtype=object)) assert_array_equal(d['floats'], np.array([[2., 3.], [3., 4.]], dtype=np.float32)) def test_write_opposite_endian(): # We don't support writing opposite endian .mat files, but we need to behave # correctly if the user supplies an other-endian NumPy array to write out. float_arr = np.array([[2., 3.], [3., 4.]]) int_arr = np.arange(6).reshape((2, 3)) uni_arr = np.array(['hello', 'world'], dtype='U') stream = BytesIO() savemat(stream, {'floats': float_arr.byteswap().newbyteorder(), 'ints': int_arr.byteswap().newbyteorder(), 'uni_arr': uni_arr.byteswap().newbyteorder()}) rdr = MatFile5Reader(stream) d = rdr.get_variables() assert_array_equal(d['floats'], float_arr) assert_array_equal(d['ints'], int_arr) assert_array_equal(d['uni_arr'], uni_arr) stream.close() def test_logical_array(): # The roundtrip test doesn't verify that we load the data up with the # correct (bool) dtype with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj: rdr = MatFile5Reader(fobj, mat_dtype=True) d = rdr.get_variables() x = np.array([[True], [False]], dtype=np.bool_) assert_array_equal(d['testbools'], x) assert_equal(d['testbools'].dtype, x.dtype) def test_logical_out_type(): # Confirm that bool type written as uint8, uint8 class # See gh-4022 stream = BytesIO() barr = np.array([False, True, False]) savemat(stream, {'barray': barr}) stream.seek(0) reader = MatFile5Reader(stream) reader.initialize_read() reader.read_file_header() hdr, _ = reader.read_var_header() assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS) assert_equal(hdr.is_logical, True) var = reader.read_var_array(hdr, False) assert_equal(var.dtype.type, np.uint8) def test_mat4_3d(): # test behavior when writing 3-D arrays to matlab 4 files stream = BytesIO() arr = np.arange(24).reshape((2,3,4)) assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4') def test_func_read(): func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat') fp = open(func_eg, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_(isinstance(d['testfunc'], MatlabFunction)) stream = BytesIO() wtr = MatFile5Writer(stream) assert_raises(MatWriteError, wtr.put_variables, d) def test_mat_dtype(): double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat') fp = open(double_eg, 'rb') rdr = MatFile5Reader(fp, mat_dtype=False) d = rdr.get_variables() fp.close() assert_equal(d['testmatrix'].dtype.kind, 'u') fp = open(double_eg, 'rb') rdr = MatFile5Reader(fp, mat_dtype=True) d = rdr.get_variables() fp.close() assert_equal(d['testmatrix'].dtype.kind, 'f') def test_sparse_in_struct(): # reproduces bug found by DC where Cython code was insisting on # ndarray return type, but getting sparse matrix st = {'sparsefield': SP.coo_matrix(np.eye(4))} stream = BytesIO() savemat(stream, {'a':st}) d = loadmat(stream, struct_as_record=True) assert_array_equal(d['a'][0,0]['sparsefield'].todense(), np.eye(4)) def test_mat_struct_squeeze(): stream = BytesIO() in_d = {'st':{'one':1, 'two':2}} savemat(stream, in_d) # no error without squeeze loadmat(stream, struct_as_record=False) # previous error was with squeeze, with mat_struct loadmat(stream, struct_as_record=False, squeeze_me=True) def test_scalar_squeeze(): stream = BytesIO() in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}} savemat(stream, in_d) out_d = loadmat(stream, squeeze_me=True) assert_(isinstance(out_d['scalar'], float)) assert_(isinstance(out_d['string'], str)) assert_(isinstance(out_d['st'], np.ndarray)) def test_str_round(): # from report by Angus McMorland on mailing list 3 May 2010 stream = BytesIO() in_arr = np.array(['Hello', 'Foob']) out_arr = np.array(['Hello', 'Foob ']) savemat(stream, dict(a=in_arr)) res = loadmat(stream) # resulted in ['HloolFoa', 'elWrdobr'] assert_array_equal(res['a'], out_arr) stream.truncate(0) stream.seek(0) # Make Fortran ordered version of string in_str = in_arr.tostring(order='F') in_from_str = np.ndarray(shape=a.shape, dtype=in_arr.dtype, order='F', buffer=in_str) savemat(stream, dict(a=in_from_str)) assert_array_equal(res['a'], out_arr) # unicode save did lead to buffer too small error stream.truncate(0) stream.seek(0) in_arr_u = in_arr.astype('U') out_arr_u = out_arr.astype('U') savemat(stream, {'a': in_arr_u}) res = loadmat(stream) assert_array_equal(res['a'], out_arr_u) def test_fieldnames(): # Check that field names are as expected stream = BytesIO() savemat(stream, {'a': {'a':1, 'b':2}}) res = loadmat(stream) field_names = res['a'].dtype.names assert_equal(set(field_names), set(('a', 'b'))) def test_loadmat_varnames(): # Test that we can get just one variable from a mat file using loadmat mat5_sys_names = ['__globals__', '__header__', '__version__'] for eg_file, sys_v_names in ( (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin( test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)): vars = loadmat(eg_file) assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names='a') assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) vars = loadmat(eg_file, variable_names=['a']) assert_equal(set(vars.keys()), set(['a'] + sys_v_names)) vars = loadmat(eg_file, variable_names=['theta']) assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names=('theta',)) assert_equal(set(vars.keys()), set(['theta'] + sys_v_names)) vars = loadmat(eg_file, variable_names=[]) assert_equal(set(vars.keys()), set(sys_v_names)) vnames = ['theta'] vars = loadmat(eg_file, variable_names=vnames) assert_equal(vnames, ['theta']) def test_round_types(): # Check that saving, loading preserves dtype in most cases arr = np.arange(10) stream = BytesIO() for dts in ('f8','f4','i8','i4','i2','i1', 'u8','u4','u2','u1','c16','c8'): stream.truncate(0) stream.seek(0) # needed for BytesIO in Python 3 savemat(stream, {'arr': arr.astype(dts)}) vars = loadmat(stream) assert_equal(np.dtype(dts), vars['arr'].dtype) def test_varmats_from_mat(): # Make a mat file with several variables, write it, read it back names_vars = (('arr', mlarr(np.arange(10))), ('mystr', mlarr('a string')), ('mynum', mlarr(10))) # Dict like thing to give variables in defined order class C(object): def items(self): return names_vars stream = BytesIO() savemat(stream, C()) varmats = varmats_from_mat(stream) assert_equal(len(varmats), 3) for i in range(3): name, var_stream = varmats[i] exp_name, exp_res = names_vars[i] assert_equal(name, exp_name) res = loadmat(var_stream) assert_array_equal(res[name], exp_res) def test_one_by_zero(): # Test 1x0 chars get read correctly func_eg = pjoin(test_data_path, 'one_by_zero_char.mat') fp = open(func_eg, 'rb') rdr = MatFile5Reader(fp) d = rdr.get_variables() fp.close() assert_equal(d['var'].shape, (0,)) def test_load_mat4_le(): # We were getting byte order wrong when reading little-endian floa64 dense # matrices on big-endian platforms mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat') vars = loadmat(mat4_fname) assert_array_equal(vars['a'], [[0.1, 1.2]]) def test_unicode_mat4(): # Mat4 should save unicode as latin1 bio = BytesIO() var = {'second_cat': 'Schrödinger'} savemat(bio, var, format='4') var_back = loadmat(bio) assert_equal(var_back['second_cat'], var['second_cat']) def test_logical_sparse(): # Test we can read logical sparse stored in mat file as bytes. # See https://github.com/scipy/scipy/issues/3539. # In some files saved by MATLAB, the sparse data elements (Real Part # Subelement in MATLAB speak) are stored with apparent type double # (miDOUBLE) but are in fact single bytes. filename = pjoin(test_data_path,'logical_sparse.mat') # Before fix, this would crash with: # ValueError: indices and data should have the same size d = loadmat(filename, struct_as_record=True) log_sp = d['sp_log_5_4'] assert_(isinstance(log_sp, SP.csc_matrix)) assert_equal(log_sp.dtype.type, np.bool_) assert_array_equal(log_sp.toarray(), [[True, True, True, False], [False, False, True, False], [False, False, True, False], [False, False, False, False], [False, False, False, False]]) def test_empty_sparse(): # Can we read empty sparse matrices? sio = BytesIO() import scipy.sparse empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]]) savemat(sio, dict(x=empty_sparse)) sio.seek(0) res = loadmat(sio) assert_array_equal(res['x'].shape, empty_sparse.shape) assert_array_equal(res['x'].todense(), 0) # Do empty sparse matrices get written with max nnz 1? # See https://github.com/scipy/scipy/issues/4208 sio.seek(0) reader = MatFile5Reader(sio) reader.initialize_read() reader.read_file_header() hdr, _ = reader.read_var_header() assert_equal(hdr.nzmax, 1) def test_empty_mat_error(): # Test we get a specific warning for an empty mat file sio = BytesIO() assert_raises(MatReadError, loadmat, sio) def test_miuint32_compromise(): # Reader should accept miUINT32 for miINT32, but check signs # mat file with miUINT32 for miINT32, but OK values filename = pjoin(test_data_path, 'miuint32_for_miint32.mat') res = loadmat(filename) assert_equal(res['an_array'], np.arange(10)[None, :]) # mat file with miUINT32 for miINT32, with negative value filename = pjoin(test_data_path, 'bad_miuint32.mat') with assert_raises(ValueError): loadmat(filename) def test_miutf8_for_miint8_compromise(): # Check reader accepts ascii as miUTF8 for array names filename = pjoin(test_data_path, 'miutf8_array_name.mat') res = loadmat(filename) assert_equal(res['array_name'], [[1]]) # mat file with non-ascii utf8 name raises error filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat') with assert_raises(ValueError): loadmat(filename) def test_bad_utf8(): # Check that reader reads bad UTF with 'replace' option filename = pjoin(test_data_path,'broken_utf8.mat') res = loadmat(filename) assert_equal(res['bad_string'], b'\x80 am broken'.decode('utf8', 'replace')) def test_save_unicode_field(tmpdir): filename = os.path.join(str(tmpdir), 'test.mat') test_dict = {u'a':{u'b':1,u'c':'test_str'}} savemat(filename, test_dict) def test_filenotfound(): # Check the correct error is thrown assert_raises(IOError, loadmat, "NotExistentFile00.mat") assert_raises(IOError, loadmat, "NotExistentFile00")
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/io/matlab/tests/test_mio.py
import numpy as np import scipy.special as sc from scipy.special._testutils import FuncData def test_sici_consistency(): # Make sure the implementation of sici for real arguments agrees # with the implementation of sici for complex arguments. # On the negative real axis Cephes drops the imaginary part in ci def sici(x): si, ci = sc.sici(x + 0j) return si.real, ci.real x = np.r_[-np.logspace(8, -30, 200), 0, np.logspace(-30, 8, 200)] si, ci = sc.sici(x) dataset = np.column_stack((x, si, ci)) FuncData(sici, dataset, 0, (1, 2), rtol=1e-12).check() def test_shichi_consistency(): # Make sure the implementation of shichi for real arguments agrees # with the implementation of shichi for complex arguments. # On the negative real axis Cephes drops the imaginary part in chi def shichi(x): shi, chi = sc.shichi(x + 0j) return shi.real, chi.real # Overflow happens quickly, so limit range x = np.r_[-np.logspace(np.log10(700), -30, 200), 0, np.logspace(-30, np.log10(700), 200)] shi, chi = sc.shichi(x) dataset = np.column_stack((x, shi, chi)) FuncData(shichi, dataset, 0, (1, 2), rtol=1e-14).check()
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/special/tests/test_sici.py
""" ================================================= Orthogonal distance regression (:mod:`scipy.odr`) ================================================= .. currentmodule:: scipy.odr Package Content =============== .. autosummary:: :toctree: generated/ Data -- The data to fit. RealData -- Data with weights as actual std. dev.s and/or covariances. Model -- Stores information about the function to be fit. ODR -- Gathers all info & manages the main fitting routine. Output -- Result from the fit. odr -- Low-level function for ODR. OdrWarning -- Warning about potential problems when running ODR. OdrError -- Error exception. OdrStop -- Stop exception. Prebuilt models: .. autosummary:: polynomial .. data:: exponential .. data:: multilinear .. data:: unilinear .. data:: quadratic .. data:: polynomial Usage information ================= Introduction ------------ Why Orthogonal Distance Regression (ODR)? Sometimes one has measurement errors in the explanatory (a.k.a., "independent") variable(s), not just the response (a.k.a., "dependent") variable(s). Ordinary Least Squares (OLS) fitting procedures treat the data for explanatory variables as fixed, i.e., not subject to error of any kind. Furthermore, OLS procedures require that the response variables be an explicit function of the explanatory variables; sometimes making the equation explicit is impractical and/or introduces errors. ODR can handle both of these cases with ease, and can even reduce to the OLS case if that is sufficient for the problem. ODRPACK is a FORTRAN-77 library for performing ODR with possibly non-linear fitting functions. It uses a modified trust-region Levenberg-Marquardt-type algorithm [1]_ to estimate the function parameters. The fitting functions are provided by Python functions operating on NumPy arrays. The required derivatives may be provided by Python functions as well, or may be estimated numerically. ODRPACK can do explicit or implicit ODR fits, or it can do OLS. Input and output variables may be multidimensional. Weights can be provided to account for different variances of the observations, and even covariances between dimensions of the variables. The `scipy.odr` package offers an object-oriented interface to ODRPACK, in addition to the low-level `odr` function. Additional background information about ODRPACK can be found in the `ODRPACK User's Guide <https://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading which is recommended. Basic usage ----------- 1. Define the function you want to fit against.:: def f(B, x): '''Linear function y = m*x + b''' # B is a vector of the parameters. # x is an array of the current x values. # x is in the same format as the x passed to Data or RealData. # # Return an array in the same format as y passed to Data or RealData. return B[0]*x + B[1] 2. Create a Model.:: linear = Model(f) 3. Create a Data or RealData instance.:: mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) or, when the actual covariances are known:: mydata = RealData(x, y, sx=sx, sy=sy) 4. Instantiate ODR with your data, model and initial parameter estimate.:: myodr = ODR(mydata, linear, beta0=[1., 2.]) 5. Run the fit.:: myoutput = myodr.run() 6. Examine output.:: myoutput.pprint() References ---------- .. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," in "Statistical analysis of measurement error models and applications: proceedings of the AMS-IMS-SIAM joint summer research conference held June 10-16, 1989," Contemporary Mathematics, vol. 112, pg. 186, 1990. """ # version: 0.7 # author: Robert Kern <robert.kern@gmail.com> # date: 2006-09-21 from .odrpack import * from .models import * __all__ = [s for s in dir() if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/odr/__init__.py
import threading import scipy._lib.decorator __all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant'] class ReentrancyError(RuntimeError): pass class ReentrancyLock(object): """ Threading lock that raises an exception for reentrant calls. Calls from different threads are serialized, and nested calls from the same thread result to an error. The object can be used as a context manager or to decorate functions via the decorate() method. """ def __init__(self, err_msg): self._rlock = threading.RLock() self._entered = False self._err_msg = err_msg def __enter__(self): self._rlock.acquire() if self._entered: self._rlock.release() raise ReentrancyError(self._err_msg) self._entered = True def __exit__(self, type, value, traceback): self._entered = False self._rlock.release() def decorate(self, func): def caller(func, *a, **kw): with self: return func(*a, **kw) return scipy._lib.decorator.decorate(func, caller) def non_reentrant(err_msg=None): """ Decorate a function with a threading lock and prevent reentrant calls. """ def decorator(func): msg = err_msg if msg is None: msg = "%s is not re-entrant" % func.__name__ lock = ReentrancyLock(msg) return lock.decorate(func) return decorator
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/_lib/_threadsafety.py
import numpy as np from numpy import cos, sin, pi from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose, assert_, suppress_warnings) from scipy.integrate import (quadrature, romberg, romb, newton_cotes, cumtrapz, quad, simps, fixed_quad) from scipy.integrate.quadrature import AccuracyWarning class TestFixedQuad(object): def test_scalar(self): n = 4 func = lambda x: x**(2*n - 1) expected = 1/(2*n) got, _ = fixed_quad(func, 0, 1, n=n) # quadrature exact for this input assert_allclose(got, expected, rtol=1e-12) def test_vector(self): n = 4 p = np.arange(1, 2*n) func = lambda x: x**p[:,None] expected = 1/(p + 1) got, _ = fixed_quad(func, 0, 1, n=n) assert_allclose(got, expected, rtol=1e-12) class TestQuadrature(object): def quad(self, x, a, b, args): raise NotImplementedError def test_quadrature(self): # Typical function with two extra arguments: def myfunc(x, n, z): # Bessel function integrand return cos(n*x-z*sin(x))/pi val, err = quadrature(myfunc, 0, pi, (2, 1.8)) table_val = 0.30614353532540296487 assert_almost_equal(val, table_val, decimal=7) def test_quadrature_rtol(self): def myfunc(x, n, z): # Bessel function integrand return 1e90 * cos(n*x-z*sin(x))/pi val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10) table_val = 1e90 * 0.30614353532540296487 assert_allclose(val, table_val, rtol=1e-10) def test_quadrature_miniter(self): # Typical function with two extra arguments: def myfunc(x, n, z): # Bessel function integrand return cos(n*x-z*sin(x))/pi table_val = 0.30614353532540296487 for miniter in [5, 52]: val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter) assert_almost_equal(val, table_val, decimal=7) assert_(err < 1.0) def test_quadrature_single_args(self): def myfunc(x, n): return 1e90 * cos(n*x-1.8*sin(x))/pi val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10) table_val = 1e90 * 0.30614353532540296487 assert_allclose(val, table_val, rtol=1e-10) def test_romberg(self): # Typical function with two extra arguments: def myfunc(x, n, z): # Bessel function integrand return cos(n*x-z*sin(x))/pi val = romberg(myfunc, 0, pi, args=(2, 1.8)) table_val = 0.30614353532540296487 assert_almost_equal(val, table_val, decimal=7) def test_romberg_rtol(self): # Typical function with two extra arguments: def myfunc(x, n, z): # Bessel function integrand return 1e19*cos(n*x-z*sin(x))/pi val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10) table_val = 1e19*0.30614353532540296487 assert_allclose(val, table_val, rtol=1e-10) def test_romb(self): assert_equal(romb(np.arange(17)), 128) def test_romb_gh_3731(self): # Check that romb makes maximal use of data points x = np.arange(2**4+1) y = np.cos(0.2*x) val = romb(y) val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max()) assert_allclose(val, val2, rtol=1e-8, atol=0) # should be equal to romb with 2**k+1 samples with suppress_warnings() as sup: sup.filter(AccuracyWarning, "divmax .4. exceeded") val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4) assert_allclose(val, val3, rtol=1e-12, atol=0) def test_non_dtype(self): # Check that we work fine with functions returning float import math valmath = romberg(math.sin, 0, 1) expected_val = 0.45969769413185085 assert_almost_equal(valmath, expected_val, decimal=7) def test_newton_cotes(self): """Test the first few degrees, for evenly spaced points.""" n = 1 wts, errcoff = newton_cotes(n, 1) assert_equal(wts, n*np.array([0.5, 0.5])) assert_almost_equal(errcoff, -n**3/12.0) n = 2 wts, errcoff = newton_cotes(n, 1) assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0) assert_almost_equal(errcoff, -n**5/2880.0) n = 3 wts, errcoff = newton_cotes(n, 1) assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0) assert_almost_equal(errcoff, -n**5/6480.0) n = 4 wts, errcoff = newton_cotes(n, 1) assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0) assert_almost_equal(errcoff, -n**7/1935360.0) def test_newton_cotes2(self): """Test newton_cotes with points that are not evenly spaced.""" x = np.array([0.0, 1.5, 2.0]) y = x**2 wts, errcoff = newton_cotes(x) exact_integral = 8.0/3 numeric_integral = np.dot(wts, y) assert_almost_equal(numeric_integral, exact_integral) x = np.array([0.0, 1.4, 2.1, 3.0]) y = x**2 wts, errcoff = newton_cotes(x) exact_integral = 9.0 numeric_integral = np.dot(wts, y) assert_almost_equal(numeric_integral, exact_integral) def test_simps(self): y = np.arange(17) assert_equal(simps(y), 128) assert_equal(simps(y, dx=0.5), 64) assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32) y = np.arange(4) x = 2**y assert_equal(simps(y, x=x, even='avg'), 13.875) assert_equal(simps(y, x=x, even='first'), 13.75) assert_equal(simps(y, x=x, even='last'), 14) class TestCumtrapz(object): def test_1d(self): x = np.linspace(-2, 2, num=5) y = x y_int = cumtrapz(y, x, initial=0) y_expected = [0., -1.5, -2., -1.5, 0.] assert_allclose(y_int, y_expected) y_int = cumtrapz(y, x, initial=None) assert_allclose(y_int, y_expected[1:]) def test_y_nd_x_nd(self): x = np.arange(3 * 2 * 4).reshape(3, 2, 4) y = x y_int = cumtrapz(y, x, initial=0) y_expected = np.array([[[0., 0.5, 2., 4.5], [0., 4.5, 10., 16.5]], [[0., 8.5, 18., 28.5], [0., 12.5, 26., 40.5]], [[0., 16.5, 34., 52.5], [0., 20.5, 42., 64.5]]]) assert_allclose(y_int, y_expected) # Try with all axes shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)] for axis, shape in zip([0, 1, 2], shapes): y_int = cumtrapz(y, x, initial=3.45, axis=axis) assert_equal(y_int.shape, (3, 2, 4)) y_int = cumtrapz(y, x, initial=None, axis=axis) assert_equal(y_int.shape, shape) def test_y_nd_x_1d(self): y = np.arange(3 * 2 * 4).reshape(3, 2, 4) x = np.arange(4)**2 # Try with all axes ys_expected = ( np.array([[[4., 5., 6., 7.], [8., 9., 10., 11.]], [[40., 44., 48., 52.], [56., 60., 64., 68.]]]), np.array([[[2., 3., 4., 5.]], [[10., 11., 12., 13.]], [[18., 19., 20., 21.]]]), np.array([[[0.5, 5., 17.5], [4.5, 21., 53.5]], [[8.5, 37., 89.5], [12.5, 53., 125.5]], [[16.5, 69., 161.5], [20.5, 85., 197.5]]])) for axis, y_expected in zip([0, 1, 2], ys_expected): y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None) assert_allclose(y_int, y_expected) def test_x_none(self): y = np.linspace(-2, 2, num=5) y_int = cumtrapz(y) y_expected = [-1.5, -2., -1.5, 0.] assert_allclose(y_int, y_expected) y_int = cumtrapz(y, initial=1.23) y_expected = [1.23, -1.5, -2., -1.5, 0.] assert_allclose(y_int, y_expected) y_int = cumtrapz(y, dx=3) y_expected = [-4.5, -6., -4.5, 0.] assert_allclose(y_int, y_expected) y_int = cumtrapz(y, dx=3, initial=1.23) y_expected = [1.23, -4.5, -6., -4.5, 0.] assert_allclose(y_int, y_expected)
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/integrate/tests/test_quadrature.py
""" Functions that operate on sparse matrices """ __all__ = ['count_blocks','estimate_blocksize'] from .csr import isspmatrix_csr, csr_matrix from .csc import isspmatrix_csc from ._sparsetools import csr_count_blocks def extract_diagonal(A): raise NotImplementedError('use .diagonal() instead') #def extract_diagonal(A): # """extract_diagonal(A) returns the main diagonal of A.""" # #TODO extract kth diagonal # if isspmatrix_csr(A) or isspmatrix_csc(A): # fn = getattr(sparsetools, A.format + "_diagonal") # y = empty( min(A.shape), dtype=upcast(A.dtype) ) # fn(A.shape[0],A.shape[1],A.indptr,A.indices,A.data,y) # return y # elif isspmatrix_bsr(A): # M,N = A.shape # R,C = A.blocksize # y = empty( min(M,N), dtype=upcast(A.dtype) ) # fn = sparsetools.bsr_diagonal(M//R, N//C, R, C, \ # A.indptr, A.indices, ravel(A.data), y) # return y # else: # return extract_diagonal(csr_matrix(A)) def estimate_blocksize(A,efficiency=0.7): """Attempt to determine the blocksize of a sparse matrix Returns a blocksize=(r,c) such that - A.nnz / A.tobsr( (r,c) ).nnz > efficiency """ if not (isspmatrix_csr(A) or isspmatrix_csc(A)): A = csr_matrix(A) if A.nnz == 0: return (1,1) if not 0 < efficiency < 1.0: raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0') high_efficiency = (1.0 + efficiency) / 2.0 nnz = float(A.nnz) M,N = A.shape if M % 2 == 0 and N % 2 == 0: e22 = nnz / (4 * count_blocks(A,(2,2))) else: e22 = 0.0 if M % 3 == 0 and N % 3 == 0: e33 = nnz / (9 * count_blocks(A,(3,3))) else: e33 = 0.0 if e22 > high_efficiency and e33 > high_efficiency: e66 = nnz / (36 * count_blocks(A,(6,6))) if e66 > efficiency: return (6,6) else: return (3,3) else: if M % 4 == 0 and N % 4 == 0: e44 = nnz / (16 * count_blocks(A,(4,4))) else: e44 = 0.0 if e44 > efficiency: return (4,4) elif e33 > efficiency: return (3,3) elif e22 > efficiency: return (2,2) else: return (1,1) def count_blocks(A,blocksize): """For a given blocksize=(r,c) count the number of occupied blocks in a sparse matrix A """ r,c = blocksize if r < 1 or c < 1: raise ValueError('r and c must be positive') if isspmatrix_csr(A): M,N = A.shape return csr_count_blocks(M,N,r,c,A.indptr,A.indices) elif isspmatrix_csc(A): return count_blocks(A.T,(c,r)) else: return count_blocks(csr_matrix(A),blocksize)
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/sparse/spfuncs.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from collections.abc import Iterable import warnings import numpy import operator from . import _ni_support from . import _nd_image from . import _ni_docstrings __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', 'gaussian_gradient_magnitude', 'correlate', 'convolve', 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', 'maximum_filter1d', 'minimum_filter', 'maximum_filter', 'rank_filter', 'median_filter', 'percentile_filter', 'generic_filter1d', 'generic_filter'] def _invalid_origin(origin, lenw): return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2) @_ni_docstrings.docfiller def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D correlation along the given axis. The lines of the array along the given axis are correlated with the given weights. Parameters ---------- %(input)s weights : array 1-D sequence of numbers. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Examples -------- >>> from scipy.ndimage import correlate1d >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([ 8, 26, 8, 12, 7, 28, 36, 9]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) weights = numpy.asarray(weights, dtype=numpy.float64) if weights.ndim != 1 or weights.shape[0] < 1: raise RuntimeError('no filter weights given') if not weights.flags.contiguous: weights = weights.copy() axis = _ni_support._check_axis(axis, input.ndim) if _invalid_origin(origin, len(weights)): raise ValueError('Invalid origin; origin must satisfy ' '-(len(weights) // 2) <= origin <= ' '(len(weights)-1) // 2') mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate1d(input, weights, axis, output, mode, cval, origin) return output @_ni_docstrings.docfiller def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray 1-D sequence of numbers. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- convolve1d : ndarray Convolved array with same shape as input Examples -------- >>> from scipy.ndimage import convolve1d >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([14, 24, 4, 13, 12, 36, 27, 0]) """ weights = weights[::-1] origin = -origin if not len(weights) & 1: origin -= 1 return correlate1d(input, weights, axis, output, mode, cval, origin) def _gaussian_kernel1d(sigma, order, radius): """ Computes a 1-D Gaussian convolution kernel. """ if order < 0: raise ValueError('order must be non-negative') exponent_range = numpy.arange(order + 1) sigma2 = sigma * sigma x = numpy.arange(-radius, radius+1) phi_x = numpy.exp(-0.5 / sigma2 * x ** 2) phi_x = phi_x / phi_x.sum() if order == 0: return phi_x else: # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) # p'(x) = -1 / sigma ** 2 # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the # coefficients of q(x) q = numpy.zeros(order + 1) q[0] = 1 D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x) P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x) Q_deriv = D + P for _ in range(order): q = Q_deriv.dot(q) q = (x[:, None] ** exponent_range).dot(q) return q * phi_x @_ni_docstrings.docfiller def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0): """1-D Gaussian filter. Parameters ---------- %(input)s sigma : scalar standard deviation for Gaussian kernel %(axis)s order : int, optional An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode)s %(cval)s truncate : float, optional Truncate the filter at this many standard deviations. Default is 4.0. Returns ------- gaussian_filter1d : ndarray Examples -------- >>> from scipy.ndimage import gaussian_filter1d >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) >>> import matplotlib.pyplot as plt >>> np.random.seed(280490) >>> x = np.random.randn(101).cumsum() >>> y3 = gaussian_filter1d(x, 3) >>> y6 = gaussian_filter1d(x, 6) >>> plt.plot(x, 'k', label='original data') >>> plt.plot(y3, '--', label='filtered, sigma=3') >>> plt.plot(y6, ':', label='filtered, sigma=6') >>> plt.legend() >>> plt.grid() >>> plt.show() """ sd = float(sigma) # make the radius of the filter equal to truncate standard deviations lw = int(truncate * sd + 0.5) # Since we are calling correlate, not convolve, revert the kernel weights = _gaussian_kernel1d(sigma, order, lw)[::-1] return correlate1d(input, weights, axis, output, mode, cval, 0) @_ni_docstrings.docfiller def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0): """Multidimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar or sequence of scalars Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. order : int or sequence of ints, optional The order of the filter along each axis is given as a sequence of integers, or as a single number. An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode_multiple)s %(cval)s truncate : float Truncate the filter at this many standard deviations. Default is 4.0. Returns ------- gaussian_filter : ndarray Returned array of same shape as `input`. Notes ----- The multidimensional filter is implemented as a sequence of 1-D convolution filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. Examples -------- >>> from scipy.ndimage import gaussian_filter >>> a = np.arange(50, step=2).reshape((5,5)) >>> a array([[ 0, 2, 4, 6, 8], [10, 12, 14, 16, 18], [20, 22, 24, 26, 28], [30, 32, 34, 36, 38], [40, 42, 44, 46, 48]]) >>> gaussian_filter(a, sigma=1) array([[ 4, 6, 8, 9, 11], [10, 12, 14, 15, 17], [20, 22, 24, 25, 27], [29, 31, 33, 34, 36], [35, 37, 39, 40, 42]]) >>> from scipy import misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = gaussian_filter(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output = _ni_support._get_output(output, input) orders = _ni_support._normalize_sequence(order, input.ndim) sigmas = _ni_support._normalize_sequence(sigma, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii]) for ii in range(len(axes)) if sigmas[ii] > 1e-15] if len(axes) > 0: for axis, sigma, order, mode in axes: gaussian_filter1d(input, sigma, axis, order, output, mode, cval, truncate) input = output else: output[...] = input[...] return output @_ni_docstrings.docfiller def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Prewitt filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.prewitt(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) return output @_ni_docstrings.docfiller def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Sobel filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.sobel(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) axis = _ni_support._check_axis(axis, input.ndim) output = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) return output @_ni_docstrings.docfiller def generic_laplace(input, derivative2, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """ N-D Laplace filter using a provided second derivative function. Parameters ---------- %(input)s derivative2 : callable Callable with the following signature:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative2(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) for ii in range(1, len(axes)): tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) output += tmp else: output[...] = input[...] return output @_ni_docstrings.docfiller def laplace(input, output=None, mode="reflect", cval=0.0): """N-D Laplace filter based on approximate second derivatives. Parameters ---------- %(input)s %(output)s %(mode_multiple)s %(cval)s Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.laplace(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ def derivative2(input, axis, output, mode, cval): return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) return generic_laplace(input, derivative2, output, mode, cval) @_ni_docstrings.docfiller def gaussian_laplace(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional Laplace filter using Gaussian second derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> ascent = misc.ascent() >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> result = ndimage.gaussian_laplace(ascent, sigma=1) >>> ax1.imshow(result) >>> result = ndimage.gaussian_laplace(ascent, sigma=3) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative2(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_laplace(input, derivative2, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) @_ni_docstrings.docfiller def generic_gradient_magnitude(input, derivative, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """Gradient magnitude using a provided gradient function. Parameters ---------- %(input)s derivative : callable Callable with the following signature:: derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. `derivative` can assume that `input` and `output` are ndarrays. Note that the output from `derivative` is modified inplace; be careful to copy important inputs before returning them. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) numpy.multiply(output, output, output) for ii in range(1, len(axes)): tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) numpy.multiply(tmp, tmp, tmp) output += tmp # This allows the sqrt to work with a different default casting numpy.sqrt(output, output, casting='unsafe') else: output[...] = input[...] return output @_ni_docstrings.docfiller def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional gradient magnitude using Gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_gradient_magnitude : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) def _correlate_or_convolve(input, weights, output, mode, cval, origin, convolution): input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) weights = numpy.asarray(weights, dtype=numpy.float64) wshape = [ii for ii in weights.shape if ii > 0] if len(wshape) != input.ndim: raise RuntimeError('filter weights array has incorrect shape.') if convolution: weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] for ii in range(len(origins)): origins[ii] = -origins[ii] if not weights.shape[ii] & 1: origins[ii] -= 1 for origin, lenw in zip(origins, wshape): if _invalid_origin(origin, lenw): raise ValueError('Invalid origin; origin must satisfy ' '-(weights.shape[k] // 2) <= origin[k] <= ' '(weights.shape[k]-1) // 2') if not weights.flags.contiguous: weights = weights.copy() output = _ni_support._get_output(output, input) if not isinstance(mode, str) and isinstance(mode, Iterable): raise RuntimeError("A sequence of modes is not supported") mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate(input, weights, output, mode, cval, origins) return output @_ni_docstrings.docfiller def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multidimensional correlation. The array is correlated with the given kernel. Parameters ---------- %(input)s weights : ndarray array of weights, same number of dimensions as input %(output)s %(mode)s %(cval)s %(origin_multiple)s See Also -------- convolve : Convolve an image with a kernel. """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, False) @_ni_docstrings.docfiller def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multidimensional convolution. The array is convolved with the given kernel. Parameters ---------- %(input)s weights : array_like Array of weights, same number of dimensions as input %(output)s %(mode)s cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 %(origin_multiple)s Returns ------- result : ndarray The result of convolution of `input` with `weights`. See Also -------- correlate : Correlate an image with a kernel. Notes ----- Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where W is the `weights` kernel, j is the N-D spatial index over :math:`W`, I is the `input` and k is the coordinate of the center of W, specified by `origin` in the input parameters. Examples -------- Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, because in this case borders (i.e., where the `weights` kernel, centered on any one value, extends beyond an edge of `input`) are treated as zeros. >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) >>> from scipy import ndimage >>> ndimage.convolve(a, k, mode='constant', cval=0.0) array([[11, 10, 7, 4], [10, 3, 11, 11], [15, 12, 14, 7], [12, 3, 7, 0]]) Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` with 1.0's (and then extracting only the original region of the result). >>> ndimage.convolve(a, k, mode='constant', cval=1.0) array([[13, 11, 8, 7], [11, 3, 11, 14], [16, 12, 14, 10], [15, 6, 10, 5]]) With ``mode='reflect'`` (the default), outer values are reflected at the edge of `input` to fill in missing values. >>> b = np.array([[2, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) >>> ndimage.convolve(b, k, mode='reflect') array([[5, 0, 0], [3, 0, 0], [1, 0, 0]]) This includes diagonally at the corners. >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) >>> ndimage.convolve(b, k) array([[4, 2, 0], [3, 2, 0], [1, 1, 0]]) With ``mode='nearest'``, the single nearest value in to an edge in `input` is repeated as many times as needed to match the overlapping `weights`. >>> c = np.array([[2, 0, 1], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0]]) >>> ndimage.convolve(c, k, mode='nearest') array([[7, 0, 3], [5, 0, 2], [3, 0, 1]]) """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, True) @_ni_docstrings.docfiller def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform filter of given size. Parameters ---------- %(input)s size : int length of uniform filter %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Examples -------- >>> from scipy.ndimage import uniform_filter1d >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([4, 3, 4, 1, 4, 6, 6, 3]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin) return output @_ni_docstrings.docfiller def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0, origin=0): """Multidimensional uniform filter. Parameters ---------- %(input)s size : int or sequence of ints, optional The sizes of the uniform filter are given for each axis as a sequence, or as a single number, in which case the size is equal for all axes. %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- uniform_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- The multidimensional filter is implemented as a sequence of 1-D uniform filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.uniform_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output = _ni_support._get_output(output, input) sizes = _ni_support._normalize_sequence(size, input.ndim) origins = _ni_support._normalize_sequence(origin, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if len(axes) > 0: for axis, size, origin, mode in axes: uniform_filter1d(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] return output @_ni_docstrings.docfiller def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D minimum filter along the given axis. The lines of the array along the given axis are filtered with a minimum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D minimum %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Notes ----- This function implements the MINLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import minimum_filter1d >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([2, 0, 0, 0, 1, 1, 0, 0]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 1) return output @_ni_docstrings.docfiller def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int Length along which to calculate the 1-D maximum. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s Returns ------- maximum1d : ndarray, None Maximum-filtered array with same shape as input. None if `output` is not None Notes ----- This function implements the MAXLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import maximum_filter1d >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([8, 8, 8, 4, 9, 9, 9, 9]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = _ni_support._check_axis(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0) return output def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum): if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) if structure is None: if footprint is None: if size is None: raise RuntimeError("no footprint provided") separable = True else: footprint = numpy.asarray(footprint, dtype=bool) if not footprint.any(): raise ValueError("All-zero footprint is not supported.") if footprint.all(): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numpy.asarray(structure, dtype=numpy.float64) separable = False if footprint is None: footprint = numpy.ones(structure.shape, bool) else: footprint = numpy.asarray(footprint, dtype=bool) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) origins = _ni_support._normalize_sequence(origin, input.ndim) if separable: sizes = _ni_support._normalize_sequence(size, input.ndim) modes = _ni_support._normalize_sequence(mode, input.ndim) axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter_ = minimum_filter1d else: filter_ = maximum_filter1d if len(axes) > 0: for axis, size, origin, mode in axes: filter_(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.ndim: raise RuntimeError('structure array has incorrect shape') if not structure.flags.contiguous: structure = structure.copy() if not isinstance(mode, str) and isinstance(mode, Iterable): raise RuntimeError( "A sequence of modes is not supported for non-separable " "footprints") mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) return output @_ni_docstrings.docfiller def minimum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multidimensional minimum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- minimum_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- A sequence of modes (one per axis) is only supported when the footprint is separable. Otherwise, a single mode string must be provided. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.minimum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 1) @_ni_docstrings.docfiller def maximum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multidimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- maximum_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- A sequence of modes (one per axis) is only supported when the footprint is separable. Otherwise, a single mode string must be provided. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.maximum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0) @_ni_docstrings.docfiller def _rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, operation='rank'): if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() filter_size = numpy.where(footprint, 1, 0).sum() if operation == 'median': rank = filter_size // 2 elif operation == 'percentile': percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError('invalid percentile') if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError('rank not within filter footprint size') if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origins) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origins) else: output = _ni_support._get_output(output, input) if not isinstance(mode, str) and isinstance(mode, Iterable): raise RuntimeError( "A sequence of modes is not supported by non-separable rank " "filters") mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) return output @_ni_docstrings.docfiller def rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multidimensional rank filter. Parameters ---------- %(input)s rank : int The rank parameter may be less then zero, i.e., rank = -1 indicates the largest element. %(size_foot)s %(output)s %(mode)s %(cval)s %(origin_multiple)s Returns ------- rank_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.rank_filter(ascent, rank=42, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ rank = operator.index(rank) return _rank_filter(input, rank, size, footprint, output, mode, cval, origin, 'rank') @_ni_docstrings.docfiller def median_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """ Calculate a multidimensional median filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode)s %(cval)s %(origin_multiple)s Returns ------- median_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.median_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, 0, size, footprint, output, mode, cval, origin, 'median') @_ni_docstrings.docfiller def percentile_filter(input, percentile, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multidimensional percentile filter. Parameters ---------- %(input)s percentile : scalar The percentile parameter may be less then zero, i.e., percentile = -20 equals percentile = 80 %(size_foot)s %(output)s %(mode)s %(cval)s %(origin_multiple)s Returns ------- percentile_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, percentile, size, footprint, output, mode, cval, origin, 'percentile') @_ni_docstrings.docfiller def generic_filter1d(input, function, filter_size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a 1-D filter along the given axis. `generic_filter1d` iterates over the lines of the array, calling the given function at each line. The arguments of the line are the input line, and the output line. The input and output lines are 1-D double arrays. The input line is extended appropriately according to the filter size and origin. The output line must be modified in-place with the result. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply along given axis. filter_size : scalar Length of the filter. %(axis)s %(output)s %(mode)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int function(double *input_line, npy_intp input_length, double *output_line, npy_intp output_length, void *user_data) int function(double *input_line, intptr_t input_length, double *output_line, intptr_t output_length, void *user_data) The calling function iterates over the lines of the input and output arrays, calling the callback function at each line. The current line is extended according to the border conditions set by the calling function, and the result is copied into the array that is passed through ``input_line``. The length of the input line (after extension) is passed through ``input_length``. The callback function should apply the filter and store the result in the array passed through ``output_line``. The length of the output line is passed through ``output_length``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) if filter_size < 1: raise RuntimeError('invalid filter size') axis = _ni_support._check_axis(axis, input.ndim) if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= filter_size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter1d(input, function, filter_size, axis, output, mode, cval, origin, extra_arguments, extra_keywords) return output @_ni_docstrings.docfiller def generic_filter(input, function, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a multidimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1-D array of double values. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply at each element. %(size_foot)s %(output)s %(mode)s %(cval)s %(origin_multiple)s %(extra_arguments)s %(extra_keywords)s Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int callback(double *buffer, npy_intp filter_size, double *return_value, void *user_data) int callback(double *buffer, intptr_t filter_size, double *return_value, void *user_data) The calling function iterates over the elements of the input and output arrays, calling the callback function at each element. The elements within the footprint of the filter at the current element are passed through the ``buffer`` parameter, and the number of elements within the footprint through ``filter_size``. The calculated value is returned in ``return_value``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. """ if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() output = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return output
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/ndimage/filters.py
"""Precompute series coefficients for log-Gamma.""" try: import mpmath except ImportError: pass def stirling_series(N): with mpmath.workdps(100): coeffs = [mpmath.bernoulli(2*n)/(2*n*(2*n - 1)) for n in range(1, N + 1)] return coeffs def taylor_series_at_1(N): coeffs = [] with mpmath.workdps(100): coeffs.append(-mpmath.euler) for n in range(2, N + 1): coeffs.append((-1)**n*mpmath.zeta(n)/n) return coeffs def main(): print(__doc__) print() stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0) for x in stirling_series(8)[::-1]] taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0) for x in taylor_series_at_1(23)[::-1]] print("Stirling series coefficients") print("----------------------------") print("\n".join(stirling_coeffs)) print() print("Taylor series coefficients") print("--------------------------") print("\n".join(taylor_coeffs)) print() if __name__ == '__main__': main()
import numpy.testing as npt import numpy as np import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast, check_freezing) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) check_freezing(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf', 'yulesimon']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(np.full(shp, param_val)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) @pytest.mark.parametrize('dist,args', distdiscrete) def test_ppf_with_loc(dist, args): try: distfn = getattr(stats, dist) except TypeError: distfn = dist #check with a negative, no and positive relocation. np.random.seed(1942349) re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)] _a, _b = distfn.support(*args) for loc in re_locs: npt.assert_array_equal( [_a-1+loc, _b+loc], [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)] ) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): _a, _b = distfn.support(*arg) supp1 = supp[supp < _b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference _a, _b = distfn.support(*arg) lo = int(max(_a, -1000)) high = int(min(_b, 1000)) + 1 distsupport = range(lo, high) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < _b: distsupp.append(_b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = _a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
aeklant/scipy
scipy/stats/tests/test_discrete_basic.py
scipy/special/_precompute/loggamma.py
# -*- coding: utf-8 -*- # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # Vincent Dubourg <vincent.dubourg@gmail.com> # (mostly translation, see implementation details) # License: BSD 3 clause """ The :mod:`sklearn.gaussian_process` module implements Gaussian Process based regression and classification. """ from ._gpr import GaussianProcessRegressor from ._gpc import GaussianProcessClassifier from . import kernels __all__ = ['GaussianProcessRegressor', 'GaussianProcessClassifier', 'kernels']
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import itertools import numpy as np import pytest from sklearn.utils._testing import assert_array_almost_equal from sklearn import datasets from sklearn.covariance import empirical_covariance, MinCovDet from sklearn.covariance import fast_mcd X = datasets.load_iris().data X_1d = X[:, 0] n_samples, n_features = X.shape def test_mcd(): # Tests the FastMCD algorithm implementation # Small data set # test without outliers (random independent normal data) launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80) # test with a contaminated data set (medium contamination) launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70) # test with a contaminated data set (strong contamination) launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50) # Medium data set launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540) # Large data set launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870) # 1D data set launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350) def test_fast_mcd_on_invalid_input(): X = np.arange(100) msg = 'Expected 2D array, got 1D array instead' with pytest.raises(ValueError, match=msg): fast_mcd(X) def test_mcd_class_on_invalid_input(): X = np.arange(100) mcd = MinCovDet() msg = 'Expected 2D array, got 1D array instead' with pytest.raises(ValueError, match=msg): mcd.fit(X) def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support): rand_gen = np.random.RandomState(0) data = rand_gen.randn(n_samples, n_features) # add some outliers outliers_index = rand_gen.permutation(n_samples)[:n_outliers] outliers_offset = 10. * \ (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5) data[outliers_index] += outliers_offset inliers_mask = np.ones(n_samples).astype(bool) inliers_mask[outliers_index] = False pure_data = data[inliers_mask] # compute MCD by fitting an object mcd_fit = MinCovDet(random_state=rand_gen).fit(data) T = mcd_fit.location_ S = mcd_fit.covariance_ H = mcd_fit.support_ # compare with the estimates learnt from the inliers error_location = np.mean((pure_data.mean(0) - T) ** 2) assert(error_location < tol_loc) error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2) assert(error_cov < tol_cov) assert(np.sum(H) >= tol_support) assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_) def test_mcd_issue1127(): # Check that the code does not break with X.shape = (3, 1) # (i.e. n_support = n_samples) rnd = np.random.RandomState(0) X = rnd.normal(size=(3, 1)) mcd = MinCovDet() mcd.fit(X) def test_mcd_issue3367(): # Check that MCD completes when the covariance matrix is singular # i.e. one of the rows and columns are all zeros rand_gen = np.random.RandomState(0) # Think of these as the values for X and Y -> 10 values between -5 and 5 data_values = np.linspace(-5, 5, 10).tolist() # Get the cartesian product of all possible coordinate pairs from above set data = np.array(list(itertools.product(data_values, data_values))) # Add a third column that's all zeros to make our data a set of point # within a plane, which means that the covariance matrix will be singular data = np.hstack((data, np.zeros((data.shape[0], 1)))) # The below line of code should raise an exception if the covariance matrix # is singular. As a further test, since we have points in XYZ, the # principle components (Eigenvectors) of these directly relate to the # geometry of the points. Since it's a plane, we should be able to test # that the Eigenvector that corresponds to the smallest Eigenvalue is the # plane normal, specifically [0, 0, 1], since everything is in the XY plane # (as I've set it up above). To do this one would start by: # # evals, evecs = np.linalg.eigh(mcd_fit.covariance_) # normal = evecs[:, np.argmin(evals)] # # After which we need to assert that our `normal` is equal to [0, 0, 1]. # Do note that there is floating point error associated with this, so it's # best to subtract the two and then compare some small tolerance (e.g. # 1e-12). MinCovDet(random_state=rand_gen).fit(data) def test_mcd_support_covariance_is_zero(): # Check that MCD returns a ValueError with informative message when the # covariance of the support data is equal to 0. X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1]) X_1 = X_1.reshape(-1, 1) X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3]) X_2 = X_2.reshape(-1, 1) msg = ('The covariance matrix of the support data is equal to 0, try to ' 'increase support_fraction') for X in [X_1, X_2]: with pytest.raises(ValueError, match=msg): MinCovDet().fit(X) def test_mcd_increasing_det_warning(): # Check that a warning is raised if we observe increasing determinants # during the c_step. In theory the sequence of determinants should be # decreasing. Increasing determinants are likely due to ill-conditioned # covariance matrices that result in poor precision matrices. X = [[5.1, 3.5, 1.4, 0.2], [4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2], [4.6, 3.1, 1.5, 0.2], [5.0, 3.6, 1.4, 0.2], [4.6, 3.4, 1.4, 0.3], [5.0, 3.4, 1.5, 0.2], [4.4, 2.9, 1.4, 0.2], [4.9, 3.1, 1.5, 0.1], [5.4, 3.7, 1.5, 0.2], [4.8, 3.4, 1.6, 0.2], [4.8, 3.0, 1.4, 0.1], [4.3, 3.0, 1.1, 0.1], [5.1, 3.5, 1.4, 0.3], [5.7, 3.8, 1.7, 0.3], [5.4, 3.4, 1.7, 0.2], [4.6, 3.6, 1.0, 0.2], [5.0, 3.0, 1.6, 0.2], [5.2, 3.5, 1.5, 0.2]] mcd = MinCovDet(random_state=1) warn_msg = "Determinant has increased" with pytest.warns(RuntimeWarning, match=warn_msg): mcd.fit(X)
kevin-intel/scikit-learn
sklearn/covariance/tests/test_robust_covariance.py
sklearn/gaussian_process/__init__.py
"""Isomap for manifold learning""" # Author: Jake Vanderplas -- <vanderplas@astro.washington.edu> # License: BSD 3 clause (C) 2011 import numpy as np from ..base import BaseEstimator, TransformerMixin from ..neighbors import NearestNeighbors, kneighbors_graph from ..utils.validation import check_is_fitted from ..utils.graph import graph_shortest_path from ..decomposition import KernelPCA from ..preprocessing import KernelCenterer class Isomap(TransformerMixin, BaseEstimator): """Isomap Embedding Non-linear dimensionality reduction through Isometric Mapping Read more in the :ref:`User Guide <isomap>`. Parameters ---------- n_neighbors : int, default=5 number of neighbors to consider for each point. n_components : int, default=2 number of coordinates for the manifold eigen_solver : {'auto', 'arpack', 'dense'}, default='auto' 'auto' : Attempt to choose the most efficient solver for the given problem. 'arpack' : Use Arnoldi decomposition to find the eigenvalues and eigenvectors. 'dense' : Use a direct solver (i.e. LAPACK) for the eigenvalue decomposition. tol : float, default=0 Convergence tolerance passed to arpack or lobpcg. not used if eigen_solver == 'dense'. max_iter : int, default=None Maximum number of iterations for the arpack solver. not used if eigen_solver == 'dense'. path_method : {'auto', 'FW', 'D'}, default='auto' Method to use in finding shortest path. 'auto' : attempt to choose the best algorithm automatically. 'FW' : Floyd-Warshall algorithm. 'D' : Dijkstra's algorithm. neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \ default='auto' Algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance. n_jobs : int or None, default=None The number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. metric : string, or callable, default="minkowski" The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a :term:`Glossary <sparse graph>`. .. versionadded:: 0.22 p : int, default=2 Parameter for the Minkowski metric from sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. .. versionadded:: 0.22 metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.22 Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. kernel_pca_ : object :class:`~sklearn.decomposition.KernelPCA` object used to implement the embedding. nbrs_ : sklearn.neighbors.NearestNeighbors instance Stores nearest neighbors instance, including BallTree or KDtree if applicable. dist_matrix_ : array-like, shape (n_samples, n_samples) Stores the geodesic distance matrix of training data. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.manifold import Isomap >>> X, _ = load_digits(return_X_y=True) >>> X.shape (1797, 64) >>> embedding = Isomap(n_components=2) >>> X_transformed = embedding.fit_transform(X[:100]) >>> X_transformed.shape (100, 2) References ---------- .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric framework for nonlinear dimensionality reduction. Science 290 (5500) """ def __init__(self, *, n_neighbors=5, n_components=2, eigen_solver='auto', tol=0, max_iter=None, path_method='auto', neighbors_algorithm='auto', n_jobs=None, metric='minkowski', p=2, metric_params=None): self.n_neighbors = n_neighbors self.n_components = n_components self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.path_method = path_method self.neighbors_algorithm = neighbors_algorithm self.n_jobs = n_jobs self.metric = metric self.p = p self.metric_params = metric_params def _fit_transform(self, X): self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors, algorithm=self.neighbors_algorithm, metric=self.metric, p=self.p, metric_params=self.metric_params, n_jobs=self.n_jobs) self.nbrs_.fit(X) self.n_features_in_ = self.nbrs_.n_features_in_ self.kernel_pca_ = KernelPCA(n_components=self.n_components, kernel="precomputed", eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter, n_jobs=self.n_jobs) kng = kneighbors_graph(self.nbrs_, self.n_neighbors, metric=self.metric, p=self.p, metric_params=self.metric_params, mode='distance', n_jobs=self.n_jobs) self.dist_matrix_ = graph_shortest_path(kng, method=self.path_method, directed=False) G = self.dist_matrix_ ** 2 G *= -0.5 self.embedding_ = self.kernel_pca_.fit_transform(G) def reconstruction_error(self): """Compute the reconstruction error for the embedding. Returns ------- reconstruction_error : float Notes ----- The cost function of an isomap embedding is ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` Where D is the matrix of distances for the input data X, D_fit is the matrix of distances for the output embedding X_fit, and K is the isomap kernel: ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)`` """ G = -0.5 * self.dist_matrix_ ** 2 G_center = KernelCenterer().fit_transform(G) evals = self.kernel_pca_.lambdas_ return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0] def fit(self, X, y=None): """Compute the embedding vectors for data X Parameters ---------- X : {array-like, sparse graph, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, sparse graph, precomputed tree, or NearestNeighbors object. y : Ignored Returns ------- self : returns an instance of self. """ self._fit_transform(X) return self def fit_transform(self, X, y=None): """Fit the model from data in X and transform X. Parameters ---------- X : {array-like, sparse graph, BallTree, KDTree} Training vector, where n_samples in the number of samples and n_features is the number of features. y : Ignored Returns ------- X_new : array-like, shape (n_samples, n_components) """ self._fit_transform(X) return self.embedding_ def transform(self, X): """Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X : array-like, shape (n_queries, n_features) If neighbors_algorithm='precomputed', X is assumed to be a distance matrix or a sparse graph of shape (n_queries, n_samples_fit). Returns ------- X_new : array-like, shape (n_queries, n_components) """ check_is_fitted(self) distances, indices = self.nbrs_.kneighbors(X, return_distance=True) # Create the graph of shortest distances from X to # training data via the nearest neighbors of X. # This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: n_samples_fit = self.nbrs_.n_samples_fit_ n_queries = distances.shape[0] G_X = np.zeros((n_queries, n_samples_fit)) for i in range(n_queries): G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import itertools import numpy as np import pytest from sklearn.utils._testing import assert_array_almost_equal from sklearn import datasets from sklearn.covariance import empirical_covariance, MinCovDet from sklearn.covariance import fast_mcd X = datasets.load_iris().data X_1d = X[:, 0] n_samples, n_features = X.shape def test_mcd(): # Tests the FastMCD algorithm implementation # Small data set # test without outliers (random independent normal data) launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80) # test with a contaminated data set (medium contamination) launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70) # test with a contaminated data set (strong contamination) launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50) # Medium data set launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540) # Large data set launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870) # 1D data set launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350) def test_fast_mcd_on_invalid_input(): X = np.arange(100) msg = 'Expected 2D array, got 1D array instead' with pytest.raises(ValueError, match=msg): fast_mcd(X) def test_mcd_class_on_invalid_input(): X = np.arange(100) mcd = MinCovDet() msg = 'Expected 2D array, got 1D array instead' with pytest.raises(ValueError, match=msg): mcd.fit(X) def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support): rand_gen = np.random.RandomState(0) data = rand_gen.randn(n_samples, n_features) # add some outliers outliers_index = rand_gen.permutation(n_samples)[:n_outliers] outliers_offset = 10. * \ (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5) data[outliers_index] += outliers_offset inliers_mask = np.ones(n_samples).astype(bool) inliers_mask[outliers_index] = False pure_data = data[inliers_mask] # compute MCD by fitting an object mcd_fit = MinCovDet(random_state=rand_gen).fit(data) T = mcd_fit.location_ S = mcd_fit.covariance_ H = mcd_fit.support_ # compare with the estimates learnt from the inliers error_location = np.mean((pure_data.mean(0) - T) ** 2) assert(error_location < tol_loc) error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2) assert(error_cov < tol_cov) assert(np.sum(H) >= tol_support) assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_) def test_mcd_issue1127(): # Check that the code does not break with X.shape = (3, 1) # (i.e. n_support = n_samples) rnd = np.random.RandomState(0) X = rnd.normal(size=(3, 1)) mcd = MinCovDet() mcd.fit(X) def test_mcd_issue3367(): # Check that MCD completes when the covariance matrix is singular # i.e. one of the rows and columns are all zeros rand_gen = np.random.RandomState(0) # Think of these as the values for X and Y -> 10 values between -5 and 5 data_values = np.linspace(-5, 5, 10).tolist() # Get the cartesian product of all possible coordinate pairs from above set data = np.array(list(itertools.product(data_values, data_values))) # Add a third column that's all zeros to make our data a set of point # within a plane, which means that the covariance matrix will be singular data = np.hstack((data, np.zeros((data.shape[0], 1)))) # The below line of code should raise an exception if the covariance matrix # is singular. As a further test, since we have points in XYZ, the # principle components (Eigenvectors) of these directly relate to the # geometry of the points. Since it's a plane, we should be able to test # that the Eigenvector that corresponds to the smallest Eigenvalue is the # plane normal, specifically [0, 0, 1], since everything is in the XY plane # (as I've set it up above). To do this one would start by: # # evals, evecs = np.linalg.eigh(mcd_fit.covariance_) # normal = evecs[:, np.argmin(evals)] # # After which we need to assert that our `normal` is equal to [0, 0, 1]. # Do note that there is floating point error associated with this, so it's # best to subtract the two and then compare some small tolerance (e.g. # 1e-12). MinCovDet(random_state=rand_gen).fit(data) def test_mcd_support_covariance_is_zero(): # Check that MCD returns a ValueError with informative message when the # covariance of the support data is equal to 0. X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1]) X_1 = X_1.reshape(-1, 1) X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3]) X_2 = X_2.reshape(-1, 1) msg = ('The covariance matrix of the support data is equal to 0, try to ' 'increase support_fraction') for X in [X_1, X_2]: with pytest.raises(ValueError, match=msg): MinCovDet().fit(X) def test_mcd_increasing_det_warning(): # Check that a warning is raised if we observe increasing determinants # during the c_step. In theory the sequence of determinants should be # decreasing. Increasing determinants are likely due to ill-conditioned # covariance matrices that result in poor precision matrices. X = [[5.1, 3.5, 1.4, 0.2], [4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2], [4.6, 3.1, 1.5, 0.2], [5.0, 3.6, 1.4, 0.2], [4.6, 3.4, 1.4, 0.3], [5.0, 3.4, 1.5, 0.2], [4.4, 2.9, 1.4, 0.2], [4.9, 3.1, 1.5, 0.1], [5.4, 3.7, 1.5, 0.2], [4.8, 3.4, 1.6, 0.2], [4.8, 3.0, 1.4, 0.1], [4.3, 3.0, 1.1, 0.1], [5.1, 3.5, 1.4, 0.3], [5.7, 3.8, 1.7, 0.3], [5.4, 3.4, 1.7, 0.2], [4.6, 3.6, 1.0, 0.2], [5.0, 3.0, 1.6, 0.2], [5.2, 3.5, 1.5, 0.2]] mcd = MinCovDet(random_state=1) warn_msg = "Determinant has increased" with pytest.warns(RuntimeWarning, match=warn_msg): mcd.fit(X)
kevin-intel/scikit-learn
sklearn/covariance/tests/test_robust_covariance.py
sklearn/manifold/_isomap.py
from typing import List, cast import numpy as np from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency import pandas as pd from pandas.io.excel._base import BaseExcelReader class ODFReader(BaseExcelReader): """ Read tables out of OpenDocument formatted files. Parameters ---------- filepath_or_buffer : string, path to be parsed or an open readable stream. storage_options : dict, optional passed to fsspec for appropriate URLs (see ``_get_filepath_or_buffer``) """ def __init__( self, filepath_or_buffer: FilePathOrBuffer, storage_options: StorageOptions = None, ): import_optional_dependency("odf") super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): from odf.opendocument import OpenDocument return OpenDocument def load_workbook(self, filepath_or_buffer: FilePathOrBuffer): from odf.opendocument import load return load(filepath_or_buffer) @property def empty_value(self) -> str: """Property for compat with other readers.""" return "" @property def sheet_names(self) -> List[str]: """Return a list of sheet names present in the document""" from odf.table import Table tables = self.book.getElementsByType(Table) return [t.getAttribute("name") for t in tables] def get_sheet_by_index(self, index: int): from odf.table import Table tables = self.book.getElementsByType(Table) return tables[index] def get_sheet_by_name(self, name: str): from odf.table import Table tables = self.book.getElementsByType(Table) for table in tables: if table.getAttribute("name") == name: return table self.close() raise ValueError(f"sheet {name} not found") def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: """ Parse an ODF Table into a list of lists """ from odf.table import CoveredTableCell, TableCell, TableRow covered_cell_name = CoveredTableCell().qname table_cell_name = TableCell().qname cell_names = {covered_cell_name, table_cell_name} sheet_rows = sheet.getElementsByType(TableRow) empty_rows = 0 max_row_len = 0 table: List[List[Scalar]] = [] for i, sheet_row in enumerate(sheet_rows): sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names] empty_cells = 0 table_row: List[Scalar] = [] for j, sheet_cell in enumerate(sheet_cells): if sheet_cell.qname == table_cell_name: value = self._get_cell_value(sheet_cell, convert_float) else: value = self.empty_value column_repeat = self._get_column_repeat(sheet_cell) # Queue up empty values, writing only if content succeeds them if value == self.empty_value: empty_cells += column_repeat else: table_row.extend([self.empty_value] * empty_cells) empty_cells = 0 table_row.extend([value] * column_repeat) if max_row_len < len(table_row): max_row_len = len(table_row) row_repeat = self._get_row_repeat(sheet_row) if self._is_empty_row(sheet_row): empty_rows += row_repeat else: # add blank rows to our table table.extend([[self.empty_value]] * empty_rows) empty_rows = 0 for _ in range(row_repeat): table.append(table_row) # Make our table square for row in table: if len(row) < max_row_len: row.extend([self.empty_value] * (max_row_len - len(row))) return table def _get_row_repeat(self, row) -> int: """ Return number of times this row was repeated Repeating an empty row appeared to be a common way of representing sparse rows in the table. """ from odf.namespaces import TABLENS return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1)) def _get_column_repeat(self, cell) -> int: from odf.namespaces import TABLENS return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1)) def _is_empty_row(self, row) -> bool: """ Helper function to find empty rows """ for column in row.childNodes: if len(column.childNodes) > 0: return False return True def _get_cell_value(self, cell, convert_float: bool) -> Scalar: from odf.namespaces import OFFICENS if str(cell) == "#N/A": return np.nan cell_type = cell.attributes.get((OFFICENS, "value-type")) if cell_type == "boolean": if str(cell) == "TRUE": return True return False if cell_type is None: return self.empty_value elif cell_type == "float": # GH5394 cell_value = float(cell.attributes.get((OFFICENS, "value"))) if convert_float: val = int(cell_value) if val == cell_value: return val return cell_value elif cell_type == "percentage": cell_value = cell.attributes.get((OFFICENS, "value")) return float(cell_value) elif cell_type == "string": return self._get_cell_string_value(cell) elif cell_type == "currency": cell_value = cell.attributes.get((OFFICENS, "value")) return float(cell_value) elif cell_type == "date": cell_value = cell.attributes.get((OFFICENS, "date-value")) return pd.to_datetime(cell_value) elif cell_type == "time": result = pd.to_datetime(str(cell)) result = cast(pd.Timestamp, result) return result.time() else: self.close() raise ValueError(f"Unrecognized type {cell_type}") def _get_cell_string_value(self, cell) -> str: """ Find and decode OpenDocument text:s tags that represent a run length encoded sequence of space characters. """ from odf.element import Element from odf.namespaces import TEXTNS from odf.text import S text_s = S().qname value = [] for fragment in cell.childNodes: if isinstance(fragment, Element): if fragment.qname == text_s: spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) value.append(" " * spaces) else: # recursive impl needed in case of nested fragments # with multiple spaces # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704 value.append(self._get_cell_string_value(fragment)) else: value.append(str(fragment)) return "".join(value)
from datetime import time, timedelta import numpy as np import pytest import pandas as pd from pandas import Series, TimedeltaIndex, isna, to_timedelta import pandas._testing as tm class TestTimedeltas: @pytest.mark.parametrize("readonly", [True, False]) def test_to_timedelta_readonly(self, readonly): # GH#34857 arr = np.array([], dtype=object) if readonly: arr.setflags(write=False) result = to_timedelta(arr) expected = to_timedelta([]) tm.assert_index_equal(result, expected) def test_to_timedelta(self): result = to_timedelta(["", ""]) assert isna(result).all() # pass thru result = to_timedelta(np.array([np.timedelta64(1, "s")])) expected = pd.Index(np.array([np.timedelta64(1, "s")])) tm.assert_index_equal(result, expected) # Series expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)]) result = to_timedelta(Series(["1d", "1days 00:00:01"])) tm.assert_series_equal(result, expected) # with units result = TimedeltaIndex( [np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")] ) expected = to_timedelta([0, 10], unit="s") tm.assert_index_equal(result, expected) # arrays of various dtypes arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="s") expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="m") expected = TimedeltaIndex([np.timedelta64(1, "m")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="h") expected = TimedeltaIndex([np.timedelta64(1, "h")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="timedelta64[s]") result = to_timedelta(arr) expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="timedelta64[D]") result = to_timedelta(arr) expected = TimedeltaIndex([np.timedelta64(1, "D")] * 5) tm.assert_index_equal(result, expected) def test_to_timedelta_dataframe(self): # GH 11776 arr = np.arange(10).reshape(2, 5) df = pd.DataFrame(np.arange(10).reshape(2, 5)) for arg in (arr, df): with pytest.raises(TypeError, match="1-d array"): to_timedelta(arg) for errors in ["ignore", "raise", "coerce"]: with pytest.raises(TypeError, match="1-d array"): to_timedelta(arg, errors=errors) def test_to_timedelta_invalid(self): # bad value for errors parameter msg = "errors must be one of" with pytest.raises(ValueError, match=msg): to_timedelta(["foo"], errors="never") # these will error msg = "invalid unit abbreviation: foo" with pytest.raises(ValueError, match=msg): to_timedelta([1, 2], unit="foo") with pytest.raises(ValueError, match=msg): to_timedelta(1, unit="foo") # time not supported ATM msg = ( "Value must be Timedelta, string, integer, float, timedelta or convertible" ) with pytest.raises(ValueError, match=msg): to_timedelta(time(second=1)) assert to_timedelta(time(second=1), errors="coerce") is pd.NaT msg = "unit abbreviation w/o a number" with pytest.raises(ValueError, match=msg): to_timedelta(["foo", "bar"]) tm.assert_index_equal( TimedeltaIndex([pd.NaT, pd.NaT]), to_timedelta(["foo", "bar"], errors="coerce"), ) tm.assert_index_equal( TimedeltaIndex(["1 day", pd.NaT, "1 min"]), to_timedelta(["1 day", "bar", "1 min"], errors="coerce"), ) # gh-13613: these should not error because errors='ignore' invalid_data = "apple" assert invalid_data == to_timedelta(invalid_data, errors="ignore") invalid_data = ["apple", "1 days"] tm.assert_numpy_array_equal( np.array(invalid_data, dtype=object), to_timedelta(invalid_data, errors="ignore"), ) invalid_data = pd.Index(["apple", "1 days"]) tm.assert_index_equal(invalid_data, to_timedelta(invalid_data, errors="ignore")) invalid_data = Series(["apple", "1 days"]) tm.assert_series_equal( invalid_data, to_timedelta(invalid_data, errors="ignore") ) @pytest.mark.parametrize( "val, warning", [ ("1M", FutureWarning), ("1 M", FutureWarning), ("1Y", FutureWarning), ("1 Y", FutureWarning), ("1y", FutureWarning), ("1 y", FutureWarning), ("1m", None), ("1 m", None), ("1 day", None), ("2day", None), ], ) def test_unambiguous_timedelta_values(self, val, warning): # GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y' # in pd.to_timedelta with tm.assert_produces_warning(warning, check_stacklevel=False): to_timedelta(val) def test_to_timedelta_via_apply(self): # GH 5458 expected = Series([np.timedelta64(1, "s")]) result = Series(["00:00:01"]).apply(to_timedelta) tm.assert_series_equal(result, expected) result = Series([to_timedelta("00:00:01")]) tm.assert_series_equal(result, expected) def test_to_timedelta_on_missing_values(self): # GH5438 timedelta_NaT = np.timedelta64("NaT") actual = pd.to_timedelta(Series(["00:00:01", np.nan])) expected = Series( [np.timedelta64(1000000000, "ns"), timedelta_NaT], dtype="<m8[ns]" ) tm.assert_series_equal(actual, expected) actual = pd.to_timedelta(Series(["00:00:01", pd.NaT])) tm.assert_series_equal(actual, expected) actual = pd.to_timedelta(np.nan) assert actual.value == timedelta_NaT.astype("int64") actual = pd.to_timedelta(pd.NaT) assert actual.value == timedelta_NaT.astype("int64") def test_to_timedelta_float(self): # https://github.com/pandas-dev/pandas/issues/25077 arr = np.arange(0, 1, 1e-6)[-10:] result = pd.to_timedelta(arr, unit="s") expected_asi8 = np.arange(999990000, 10 ** 9, 1000, dtype="int64") tm.assert_numpy_array_equal(result.asi8, expected_asi8) def test_to_timedelta_coerce_strings_unit(self): arr = np.array([1, 2, "error"], dtype=object) result = pd.to_timedelta(arr, unit="ns", errors="coerce") expected = pd.to_timedelta([1, 2, pd.NaT], unit="ns") tm.assert_index_equal(result, expected) def test_to_timedelta_ignore_strings_unit(self): arr = np.array([1, 2, "error"], dtype=object) result = pd.to_timedelta(arr, unit="ns", errors="ignore") tm.assert_numpy_array_equal(result, arr) def test_to_timedelta_nullable_int64_dtype(self): # GH 35574 expected = Series([timedelta(days=1), timedelta(days=2)]) result = to_timedelta(Series([1, 2], dtype="Int64"), unit="days") tm.assert_series_equal(result, expected) # IntegerArray Series with nulls expected = Series([timedelta(days=1), None]) result = to_timedelta(Series([1, None], dtype="Int64"), unit="days") tm.assert_series_equal(result, expected) @pytest.mark.parametrize( ("input", "expected"), [ ("8:53:08.71800000001", "8:53:08.718"), ("8:53:08.718001", "8:53:08.718001"), ("8:53:08.7180000001", "8:53:08.7180000001"), ("-8:53:08.71800000001", "-8:53:08.718"), ("8:53:08.7180000089", "8:53:08.718000008"), ], ) @pytest.mark.parametrize("func", [pd.Timedelta, pd.to_timedelta]) def test_to_timedelta_precision_over_nanos(self, input, expected, func): # GH: 36738 expected = pd.Timedelta(expected) result = func(input) assert result == expected def test_to_timedelta_zerodim(self): # ndarray.item() incorrectly returns int for dt64[ns] and td64[ns] dt64 = pd.Timestamp.now().to_datetime64() arg = np.array(dt64) msg = ( "Value must be Timedelta, string, integer, float, timedelta " "or convertible, not datetime64" ) with pytest.raises(ValueError, match=msg): to_timedelta(arg) arg2 = arg.view("m8[ns]") result = to_timedelta(arg2) assert isinstance(result, pd.Timedelta) assert result.value == dt64.view("i8")
jreback/pandas
pandas/tests/tools/test_to_timedelta.py
pandas/io/excel/_odfreader.py
""" Helpers for configuring locale settings. Name `localization` is chosen to avoid overlap with builtin `locale` module. """ from contextlib import contextmanager import locale import re import subprocess from pandas._config.config import options @contextmanager def set_locale(new_locale, lc_var: int = locale.LC_ALL): """ Context manager for temporarily setting a locale. Parameters ---------- new_locale : str or tuple A string of the form <language_country>.<encoding>. For example to set the current locale to US English with a UTF8 encoding, you would pass "en_US.UTF-8". lc_var : int, default `locale.LC_ALL` The category of the locale being set. Notes ----- This is useful when you want to run a particular block of code under a particular locale, without globally setting the locale. This probably isn't thread-safe. """ current_locale = locale.getlocale() try: locale.setlocale(lc_var, new_locale) normalized_locale = locale.getlocale() if all(x is not None for x in normalized_locale): yield ".".join(normalized_locale) else: yield new_locale finally: locale.setlocale(lc_var, current_locale) def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool: """ Check to see if we can set a locale, and subsequently get the locale, without raising an Exception. Parameters ---------- lc : str The locale to attempt to set. lc_var : int, default `locale.LC_ALL` The category of the locale being set. Returns ------- bool Whether the passed locale can be set """ try: with set_locale(lc, lc_var=lc_var): pass except (ValueError, locale.Error): # horrible name for a Exception subclass return False else: return True def _valid_locales(locales, normalize): """ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """ return [ loc for loc in ( locale.normalize(loc.strip()) if normalize else loc.strip() for loc in locales ) if can_set_locale(loc) ] def _default_locale_getter(): return subprocess.check_output(["locale -a"], shell=True) def get_locales(prefix=None, normalize=True, locale_getter=_default_locale_getter): """ Get all the locales that are available on the system. Parameters ---------- prefix : str If not ``None`` then return only those locales with the prefix provided. For example to get all English language locales (those that start with ``"en"``), pass ``prefix="en"``. normalize : bool Call ``locale.normalize`` on the resulting list of available locales. If ``True``, only locales that can be set without throwing an ``Exception`` are returned. locale_getter : callable The function to use to retrieve the current locales. This should return a string with each locale separated by a newline character. Returns ------- locales : list of strings A list of locale strings that can be set with ``locale.setlocale()``. For example:: locale.setlocale(locale.LC_ALL, locale_string) On error will return None (no locale available, e.g. Windows) """ try: raw_locales = locale_getter() except subprocess.CalledProcessError: # Raised on (some? all?) Windows platforms because Note: "locale -a" # is not defined return None try: # raw_locales is "\n" separated list of locales # it may contain non-decodable parts, so split # extract what we can and then rejoin. raw_locales = raw_locales.split(b"\n") out_locales = [] for x in raw_locales: try: out_locales.append(str(x, encoding=options.display.encoding)) except UnicodeError: # 'locale -a' is used to populated 'raw_locales' and on # Redhat 7 Linux (and maybe others) prints locale names # using windows-1252 encoding. Bug only triggered by # a few special characters and when there is an # extensive list of installed locales. out_locales.append(str(x, encoding="windows-1252")) except TypeError: pass if prefix is None: return _valid_locales(out_locales, normalize) pattern = re.compile(f"{prefix}.*") found = pattern.findall("\n".join(out_locales)) return _valid_locales(found, normalize)
from datetime import time, timedelta import numpy as np import pytest import pandas as pd from pandas import Series, TimedeltaIndex, isna, to_timedelta import pandas._testing as tm class TestTimedeltas: @pytest.mark.parametrize("readonly", [True, False]) def test_to_timedelta_readonly(self, readonly): # GH#34857 arr = np.array([], dtype=object) if readonly: arr.setflags(write=False) result = to_timedelta(arr) expected = to_timedelta([]) tm.assert_index_equal(result, expected) def test_to_timedelta(self): result = to_timedelta(["", ""]) assert isna(result).all() # pass thru result = to_timedelta(np.array([np.timedelta64(1, "s")])) expected = pd.Index(np.array([np.timedelta64(1, "s")])) tm.assert_index_equal(result, expected) # Series expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)]) result = to_timedelta(Series(["1d", "1days 00:00:01"])) tm.assert_series_equal(result, expected) # with units result = TimedeltaIndex( [np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")] ) expected = to_timedelta([0, 10], unit="s") tm.assert_index_equal(result, expected) # arrays of various dtypes arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="s") expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="m") expected = TimedeltaIndex([np.timedelta64(1, "m")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="h") expected = TimedeltaIndex([np.timedelta64(1, "h")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="timedelta64[s]") result = to_timedelta(arr) expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="timedelta64[D]") result = to_timedelta(arr) expected = TimedeltaIndex([np.timedelta64(1, "D")] * 5) tm.assert_index_equal(result, expected) def test_to_timedelta_dataframe(self): # GH 11776 arr = np.arange(10).reshape(2, 5) df = pd.DataFrame(np.arange(10).reshape(2, 5)) for arg in (arr, df): with pytest.raises(TypeError, match="1-d array"): to_timedelta(arg) for errors in ["ignore", "raise", "coerce"]: with pytest.raises(TypeError, match="1-d array"): to_timedelta(arg, errors=errors) def test_to_timedelta_invalid(self): # bad value for errors parameter msg = "errors must be one of" with pytest.raises(ValueError, match=msg): to_timedelta(["foo"], errors="never") # these will error msg = "invalid unit abbreviation: foo" with pytest.raises(ValueError, match=msg): to_timedelta([1, 2], unit="foo") with pytest.raises(ValueError, match=msg): to_timedelta(1, unit="foo") # time not supported ATM msg = ( "Value must be Timedelta, string, integer, float, timedelta or convertible" ) with pytest.raises(ValueError, match=msg): to_timedelta(time(second=1)) assert to_timedelta(time(second=1), errors="coerce") is pd.NaT msg = "unit abbreviation w/o a number" with pytest.raises(ValueError, match=msg): to_timedelta(["foo", "bar"]) tm.assert_index_equal( TimedeltaIndex([pd.NaT, pd.NaT]), to_timedelta(["foo", "bar"], errors="coerce"), ) tm.assert_index_equal( TimedeltaIndex(["1 day", pd.NaT, "1 min"]), to_timedelta(["1 day", "bar", "1 min"], errors="coerce"), ) # gh-13613: these should not error because errors='ignore' invalid_data = "apple" assert invalid_data == to_timedelta(invalid_data, errors="ignore") invalid_data = ["apple", "1 days"] tm.assert_numpy_array_equal( np.array(invalid_data, dtype=object), to_timedelta(invalid_data, errors="ignore"), ) invalid_data = pd.Index(["apple", "1 days"]) tm.assert_index_equal(invalid_data, to_timedelta(invalid_data, errors="ignore")) invalid_data = Series(["apple", "1 days"]) tm.assert_series_equal( invalid_data, to_timedelta(invalid_data, errors="ignore") ) @pytest.mark.parametrize( "val, warning", [ ("1M", FutureWarning), ("1 M", FutureWarning), ("1Y", FutureWarning), ("1 Y", FutureWarning), ("1y", FutureWarning), ("1 y", FutureWarning), ("1m", None), ("1 m", None), ("1 day", None), ("2day", None), ], ) def test_unambiguous_timedelta_values(self, val, warning): # GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y' # in pd.to_timedelta with tm.assert_produces_warning(warning, check_stacklevel=False): to_timedelta(val) def test_to_timedelta_via_apply(self): # GH 5458 expected = Series([np.timedelta64(1, "s")]) result = Series(["00:00:01"]).apply(to_timedelta) tm.assert_series_equal(result, expected) result = Series([to_timedelta("00:00:01")]) tm.assert_series_equal(result, expected) def test_to_timedelta_on_missing_values(self): # GH5438 timedelta_NaT = np.timedelta64("NaT") actual = pd.to_timedelta(Series(["00:00:01", np.nan])) expected = Series( [np.timedelta64(1000000000, "ns"), timedelta_NaT], dtype="<m8[ns]" ) tm.assert_series_equal(actual, expected) actual = pd.to_timedelta(Series(["00:00:01", pd.NaT])) tm.assert_series_equal(actual, expected) actual = pd.to_timedelta(np.nan) assert actual.value == timedelta_NaT.astype("int64") actual = pd.to_timedelta(pd.NaT) assert actual.value == timedelta_NaT.astype("int64") def test_to_timedelta_float(self): # https://github.com/pandas-dev/pandas/issues/25077 arr = np.arange(0, 1, 1e-6)[-10:] result = pd.to_timedelta(arr, unit="s") expected_asi8 = np.arange(999990000, 10 ** 9, 1000, dtype="int64") tm.assert_numpy_array_equal(result.asi8, expected_asi8) def test_to_timedelta_coerce_strings_unit(self): arr = np.array([1, 2, "error"], dtype=object) result = pd.to_timedelta(arr, unit="ns", errors="coerce") expected = pd.to_timedelta([1, 2, pd.NaT], unit="ns") tm.assert_index_equal(result, expected) def test_to_timedelta_ignore_strings_unit(self): arr = np.array([1, 2, "error"], dtype=object) result = pd.to_timedelta(arr, unit="ns", errors="ignore") tm.assert_numpy_array_equal(result, arr) def test_to_timedelta_nullable_int64_dtype(self): # GH 35574 expected = Series([timedelta(days=1), timedelta(days=2)]) result = to_timedelta(Series([1, 2], dtype="Int64"), unit="days") tm.assert_series_equal(result, expected) # IntegerArray Series with nulls expected = Series([timedelta(days=1), None]) result = to_timedelta(Series([1, None], dtype="Int64"), unit="days") tm.assert_series_equal(result, expected) @pytest.mark.parametrize( ("input", "expected"), [ ("8:53:08.71800000001", "8:53:08.718"), ("8:53:08.718001", "8:53:08.718001"), ("8:53:08.7180000001", "8:53:08.7180000001"), ("-8:53:08.71800000001", "-8:53:08.718"), ("8:53:08.7180000089", "8:53:08.718000008"), ], ) @pytest.mark.parametrize("func", [pd.Timedelta, pd.to_timedelta]) def test_to_timedelta_precision_over_nanos(self, input, expected, func): # GH: 36738 expected = pd.Timedelta(expected) result = func(input) assert result == expected def test_to_timedelta_zerodim(self): # ndarray.item() incorrectly returns int for dt64[ns] and td64[ns] dt64 = pd.Timestamp.now().to_datetime64() arg = np.array(dt64) msg = ( "Value must be Timedelta, string, integer, float, timedelta " "or convertible, not datetime64" ) with pytest.raises(ValueError, match=msg): to_timedelta(arg) arg2 = arg.view("m8[ns]") result = to_timedelta(arg2) assert isinstance(result, pd.Timedelta) assert result.value == dt64.view("i8")
jreback/pandas
pandas/tests/tools/test_to_timedelta.py
pandas/_config/localization.py
from typing import Optional, Type import pytest import pandas as pd import pandas._testing as tm from pandas.core import ops from .base import BaseExtensionTests class BaseOpsUtil(BaseExtensionTests): def get_op_from_name(self, op_name): return tm.get_op_from_name(op_name) def check_opname(self, s, op_name, other, exc=Exception): op = self.get_op_from_name(op_name) self._check_op(s, op, other, op_name, exc) def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: result = op(s, other) if isinstance(s, pd.DataFrame): if len(s.columns) != 1: raise NotImplementedError expected = s.iloc[:, 0].combine(other, op).to_frame() self.assert_frame_equal(result, expected) else: expected = s.combine(other, op) self.assert_series_equal(result, expected) else: with pytest.raises(exc): op(s, other) def _check_divmod_op(self, s, op, other, exc=Exception): # divmod has multiple return values, so check separately if exc is None: result_div, result_mod = op(s, other) if op is divmod: expected_div, expected_mod = s // other, s % other else: expected_div, expected_mod = other // s, other % s self.assert_series_equal(result_div, expected_div) self.assert_series_equal(result_mod, expected_mod) else: with pytest.raises(exc): divmod(s, other) class BaseArithmeticOpsTests(BaseOpsUtil): """ Various Series and DataFrame arithmetic ops methods. Subclasses supporting various ops should set the class variables to indicate that they support ops of that kind * series_scalar_exc = TypeError * frame_scalar_exc = TypeError * series_array_exc = TypeError * divmod_exc = TypeError """ series_scalar_exc: Optional[Type[TypeError]] = TypeError frame_scalar_exc: Optional[Type[TypeError]] = TypeError series_array_exc: Optional[Type[TypeError]] = TypeError divmod_exc: Optional[Type[TypeError]] = TypeError def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # series & scalar op_name = all_arithmetic_operators s = pd.Series(data) self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc) @pytest.mark.xfail(run=False, reason="_reduce needs implementation") def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): # frame & scalar op_name = all_arithmetic_operators df = pd.DataFrame({"A": data}) self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc) def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op_name = all_arithmetic_operators s = pd.Series(data) self.check_opname( s, op_name, pd.Series([s.iloc[0]] * len(s)), exc=self.series_array_exc ) def test_divmod(self, data): s = pd.Series(data) self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc) self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc) def test_divmod_series_array(self, data, data_for_twos): s = pd.Series(data) self._check_divmod_op(s, divmod, data) other = data_for_twos self._check_divmod_op(other, ops.rdivmod, s) other = pd.Series(other) self._check_divmod_op(other, ops.rdivmod, s) def test_add_series_with_extension_array(self, data): s = pd.Series(data) result = s + data expected = pd.Series(data + data) self.assert_series_equal(result, expected) def test_error(self, data, all_arithmetic_operators): # invalid ops op_name = all_arithmetic_operators with pytest.raises(AttributeError): getattr(data, op_name) @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) if box is pd.DataFrame: other = other.to_frame() if hasattr(data, "__add__"): result = data.__add__(other) assert result is NotImplemented else: raise pytest.skip(f"{type(data).__name__} does not implement add") class BaseComparisonOpsTests(BaseOpsUtil): """Various Series and DataFrame comparison ops methods.""" def _compare_other(self, s, data, op_name, other): op = self.get_op_from_name(op_name) if op_name == "__eq__": assert not op(s, other).all() elif op_name == "__ne__": assert op(s, other).all() else: # array assert getattr(data, op_name)(other) is NotImplemented # series s = pd.Series(data) with pytest.raises(TypeError): op(s, other) def test_compare_scalar(self, data, all_compare_operators): op_name = all_compare_operators s = pd.Series(data) self._compare_other(s, data, op_name, 0) def test_compare_array(self, data, all_compare_operators): op_name = all_compare_operators s = pd.Series(data) other = pd.Series([data[0]] * len(data)) self._compare_other(s, data, op_name, other) @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) if box is pd.DataFrame: other = other.to_frame() if hasattr(data, "__eq__"): result = data.__eq__(other) assert result is NotImplemented else: raise pytest.skip(f"{type(data).__name__} does not implement __eq__") if hasattr(data, "__ne__"): result = data.__ne__(other) assert result is NotImplemented else: raise pytest.skip(f"{type(data).__name__} does not implement __ne__") class BaseUnaryOpsTests(BaseOpsUtil): def test_invert(self, data): s = pd.Series(data, name="name") result = ~s expected = pd.Series(~data, name="name") self.assert_series_equal(result, expected)
from datetime import time, timedelta import numpy as np import pytest import pandas as pd from pandas import Series, TimedeltaIndex, isna, to_timedelta import pandas._testing as tm class TestTimedeltas: @pytest.mark.parametrize("readonly", [True, False]) def test_to_timedelta_readonly(self, readonly): # GH#34857 arr = np.array([], dtype=object) if readonly: arr.setflags(write=False) result = to_timedelta(arr) expected = to_timedelta([]) tm.assert_index_equal(result, expected) def test_to_timedelta(self): result = to_timedelta(["", ""]) assert isna(result).all() # pass thru result = to_timedelta(np.array([np.timedelta64(1, "s")])) expected = pd.Index(np.array([np.timedelta64(1, "s")])) tm.assert_index_equal(result, expected) # Series expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)]) result = to_timedelta(Series(["1d", "1days 00:00:01"])) tm.assert_series_equal(result, expected) # with units result = TimedeltaIndex( [np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")] ) expected = to_timedelta([0, 10], unit="s") tm.assert_index_equal(result, expected) # arrays of various dtypes arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="s") expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="m") expected = TimedeltaIndex([np.timedelta64(1, "m")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="int64") result = to_timedelta(arr, unit="h") expected = TimedeltaIndex([np.timedelta64(1, "h")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="timedelta64[s]") result = to_timedelta(arr) expected = TimedeltaIndex([np.timedelta64(1, "s")] * 5) tm.assert_index_equal(result, expected) arr = np.array([1] * 5, dtype="timedelta64[D]") result = to_timedelta(arr) expected = TimedeltaIndex([np.timedelta64(1, "D")] * 5) tm.assert_index_equal(result, expected) def test_to_timedelta_dataframe(self): # GH 11776 arr = np.arange(10).reshape(2, 5) df = pd.DataFrame(np.arange(10).reshape(2, 5)) for arg in (arr, df): with pytest.raises(TypeError, match="1-d array"): to_timedelta(arg) for errors in ["ignore", "raise", "coerce"]: with pytest.raises(TypeError, match="1-d array"): to_timedelta(arg, errors=errors) def test_to_timedelta_invalid(self): # bad value for errors parameter msg = "errors must be one of" with pytest.raises(ValueError, match=msg): to_timedelta(["foo"], errors="never") # these will error msg = "invalid unit abbreviation: foo" with pytest.raises(ValueError, match=msg): to_timedelta([1, 2], unit="foo") with pytest.raises(ValueError, match=msg): to_timedelta(1, unit="foo") # time not supported ATM msg = ( "Value must be Timedelta, string, integer, float, timedelta or convertible" ) with pytest.raises(ValueError, match=msg): to_timedelta(time(second=1)) assert to_timedelta(time(second=1), errors="coerce") is pd.NaT msg = "unit abbreviation w/o a number" with pytest.raises(ValueError, match=msg): to_timedelta(["foo", "bar"]) tm.assert_index_equal( TimedeltaIndex([pd.NaT, pd.NaT]), to_timedelta(["foo", "bar"], errors="coerce"), ) tm.assert_index_equal( TimedeltaIndex(["1 day", pd.NaT, "1 min"]), to_timedelta(["1 day", "bar", "1 min"], errors="coerce"), ) # gh-13613: these should not error because errors='ignore' invalid_data = "apple" assert invalid_data == to_timedelta(invalid_data, errors="ignore") invalid_data = ["apple", "1 days"] tm.assert_numpy_array_equal( np.array(invalid_data, dtype=object), to_timedelta(invalid_data, errors="ignore"), ) invalid_data = pd.Index(["apple", "1 days"]) tm.assert_index_equal(invalid_data, to_timedelta(invalid_data, errors="ignore")) invalid_data = Series(["apple", "1 days"]) tm.assert_series_equal( invalid_data, to_timedelta(invalid_data, errors="ignore") ) @pytest.mark.parametrize( "val, warning", [ ("1M", FutureWarning), ("1 M", FutureWarning), ("1Y", FutureWarning), ("1 Y", FutureWarning), ("1y", FutureWarning), ("1 y", FutureWarning), ("1m", None), ("1 m", None), ("1 day", None), ("2day", None), ], ) def test_unambiguous_timedelta_values(self, val, warning): # GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y' # in pd.to_timedelta with tm.assert_produces_warning(warning, check_stacklevel=False): to_timedelta(val) def test_to_timedelta_via_apply(self): # GH 5458 expected = Series([np.timedelta64(1, "s")]) result = Series(["00:00:01"]).apply(to_timedelta) tm.assert_series_equal(result, expected) result = Series([to_timedelta("00:00:01")]) tm.assert_series_equal(result, expected) def test_to_timedelta_on_missing_values(self): # GH5438 timedelta_NaT = np.timedelta64("NaT") actual = pd.to_timedelta(Series(["00:00:01", np.nan])) expected = Series( [np.timedelta64(1000000000, "ns"), timedelta_NaT], dtype="<m8[ns]" ) tm.assert_series_equal(actual, expected) actual = pd.to_timedelta(Series(["00:00:01", pd.NaT])) tm.assert_series_equal(actual, expected) actual = pd.to_timedelta(np.nan) assert actual.value == timedelta_NaT.astype("int64") actual = pd.to_timedelta(pd.NaT) assert actual.value == timedelta_NaT.astype("int64") def test_to_timedelta_float(self): # https://github.com/pandas-dev/pandas/issues/25077 arr = np.arange(0, 1, 1e-6)[-10:] result = pd.to_timedelta(arr, unit="s") expected_asi8 = np.arange(999990000, 10 ** 9, 1000, dtype="int64") tm.assert_numpy_array_equal(result.asi8, expected_asi8) def test_to_timedelta_coerce_strings_unit(self): arr = np.array([1, 2, "error"], dtype=object) result = pd.to_timedelta(arr, unit="ns", errors="coerce") expected = pd.to_timedelta([1, 2, pd.NaT], unit="ns") tm.assert_index_equal(result, expected) def test_to_timedelta_ignore_strings_unit(self): arr = np.array([1, 2, "error"], dtype=object) result = pd.to_timedelta(arr, unit="ns", errors="ignore") tm.assert_numpy_array_equal(result, arr) def test_to_timedelta_nullable_int64_dtype(self): # GH 35574 expected = Series([timedelta(days=1), timedelta(days=2)]) result = to_timedelta(Series([1, 2], dtype="Int64"), unit="days") tm.assert_series_equal(result, expected) # IntegerArray Series with nulls expected = Series([timedelta(days=1), None]) result = to_timedelta(Series([1, None], dtype="Int64"), unit="days") tm.assert_series_equal(result, expected) @pytest.mark.parametrize( ("input", "expected"), [ ("8:53:08.71800000001", "8:53:08.718"), ("8:53:08.718001", "8:53:08.718001"), ("8:53:08.7180000001", "8:53:08.7180000001"), ("-8:53:08.71800000001", "-8:53:08.718"), ("8:53:08.7180000089", "8:53:08.718000008"), ], ) @pytest.mark.parametrize("func", [pd.Timedelta, pd.to_timedelta]) def test_to_timedelta_precision_over_nanos(self, input, expected, func): # GH: 36738 expected = pd.Timedelta(expected) result = func(input) assert result == expected def test_to_timedelta_zerodim(self): # ndarray.item() incorrectly returns int for dt64[ns] and td64[ns] dt64 = pd.Timestamp.now().to_datetime64() arg = np.array(dt64) msg = ( "Value must be Timedelta, string, integer, float, timedelta " "or convertible, not datetime64" ) with pytest.raises(ValueError, match=msg): to_timedelta(arg) arg2 = arg.view("m8[ns]") result = to_timedelta(arg2) assert isinstance(result, pd.Timedelta) assert result.value == dt64.view("i8")
jreback/pandas
pandas/tests/tools/test_to_timedelta.py
pandas/tests/extension/base/ops.py
from __future__ import division, print_function, absolute_import def configuration(parent_name='special', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('_precompute', parent_name, top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration().todict())
# # Tests of spherical Bessel functions. # from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_almost_equal, assert_allclose, assert_array_almost_equal) import pytest from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn from scipy.integrate import quad from scipy._lib._numpy_compat import suppress_warnings class TestSphericalJn: def test_spherical_jn_exact(self): # https://dlmf.nist.gov/10.49.E3 # Note: exact expression is numerically stable only for small # n or z >> n. x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) assert_allclose(spherical_jn(2, x), (-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x)) def test_spherical_jn_recurrence_complex(self): # https://dlmf.nist.gov/10.51.E1 n = np.array([1, 2, 3, 7, 12]) x = 1.1 + 1.5j assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x), (2*n + 1)/x*spherical_jn(n, x)) def test_spherical_jn_recurrence_real(self): # https://dlmf.nist.gov/10.51.E1 n = np.array([1, 2, 3, 7, 12]) x = 0.12 assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x), (2*n + 1)/x*spherical_jn(n, x)) def test_spherical_jn_inf_real(self): # https://dlmf.nist.gov/10.52.E3 n = 6 x = np.array([-inf, inf]) assert_allclose(spherical_jn(n, x), np.array([0, 0])) def test_spherical_jn_inf_complex(self): # https://dlmf.nist.gov/10.52.E3 n = 7 x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in multiply") assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)])) def test_spherical_jn_large_arg_1(self): # https://github.com/scipy/scipy/issues/2165 # Reference value computed using mpmath, via # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747) def test_spherical_jn_large_arg_2(self): # https://github.com/scipy/scipy/issues/1641 # Reference value computed using mpmath, via # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z)) assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05) def test_spherical_jn_at_zero(self): # https://dlmf.nist.gov/10.52.E1 # But note that n = 0 is a special case: j0 = sin(x)/x -> 1 n = np.array([0, 1, 2, 5, 10, 100]) x = 0 assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0])) class TestSphericalYn: def test_spherical_yn_exact(self): # https://dlmf.nist.gov/10.49.E5 # Note: exact expression is numerically stable only for small # n or z >> n. x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5]) assert_allclose(spherical_yn(2, x), (1/x - 3/x**3)*cos(x) - 3/x**2*sin(x)) def test_spherical_yn_recurrence_real(self): # https://dlmf.nist.gov/10.51.E1 n = np.array([1, 2, 3, 7, 12]) x = 0.12 assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x), (2*n + 1)/x*spherical_yn(n, x)) def test_spherical_yn_recurrence_complex(self): # https://dlmf.nist.gov/10.51.E1 n = np.array([1, 2, 3, 7, 12]) x = 1.1 + 1.5j assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x), (2*n + 1)/x*spherical_yn(n, x)) def test_spherical_yn_inf_real(self): # https://dlmf.nist.gov/10.52.E3 n = 6 x = np.array([-inf, inf]) assert_allclose(spherical_yn(n, x), np.array([0, 0])) def test_spherical_yn_inf_complex(self): # https://dlmf.nist.gov/10.52.E3 n = 7 x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in multiply") assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)])) def test_spherical_yn_at_zero(self): # https://dlmf.nist.gov/10.52.E2 n = np.array([0, 1, 2, 5, 10, 100]) x = 0 assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf)) def test_spherical_yn_at_zero_complex(self): # Consistently with numpy: # >>> -np.cos(0)/0 # -inf # >>> -np.cos(0+0j)/(0+0j) # (-inf + nan*j) n = np.array([0, 1, 2, 5, 10, 100]) x = 0 + 0j assert_allclose(spherical_yn(n, x), np.full(n.shape, nan)) class TestSphericalJnYnCrossProduct: def test_spherical_jn_yn_cross_product_1(self): # https://dlmf.nist.gov/10.50.E3 n = np.array([1, 5, 8]) x = np.array([0.1, 1, 10]) left = (spherical_jn(n + 1, x) * spherical_yn(n, x) - spherical_jn(n, x) * spherical_yn(n + 1, x)) right = 1/x**2 assert_allclose(left, right) def test_spherical_jn_yn_cross_product_2(self): # https://dlmf.nist.gov/10.50.E3 n = np.array([1, 5, 8]) x = np.array([0.1, 1, 10]) left = (spherical_jn(n + 2, x) * spherical_yn(n, x) - spherical_jn(n, x) * spherical_yn(n + 2, x)) right = (2*n + 3)/x**3 assert_allclose(left, right) class TestSphericalIn: def test_spherical_in_exact(self): # https://dlmf.nist.gov/10.49.E9 x = np.array([0.12, 1.23, 12.34, 123.45]) assert_allclose(spherical_in(2, x), (1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x)) def test_spherical_in_recurrence_real(self): # https://dlmf.nist.gov/10.51.E4 n = np.array([1, 2, 3, 7, 12]) x = 0.12 assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), (2*n + 1)/x*spherical_in(n, x)) def test_spherical_in_recurrence_complex(self): # https://dlmf.nist.gov/10.51.E1 n = np.array([1, 2, 3, 7, 12]) x = 1.1 + 1.5j assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x), (2*n + 1)/x*spherical_in(n, x)) def test_spherical_in_inf_real(self): # https://dlmf.nist.gov/10.52.E3 n = 5 x = np.array([-inf, inf]) assert_allclose(spherical_in(n, x), np.array([-inf, inf])) def test_spherical_in_inf_complex(self): # https://dlmf.nist.gov/10.52.E5 # Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but # this appears impossible to achieve because C99 regards any complex # value with at least one infinite part as a complex infinity, so # 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is # the correct return value. n = 7 x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan])) def test_spherical_in_at_zero(self): # https://dlmf.nist.gov/10.52.E1 # But note that n = 0 is a special case: i0 = sinh(x)/x -> 1 n = np.array([0, 1, 2, 5, 10, 100]) x = 0 assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0])) class TestSphericalKn: def test_spherical_kn_exact(self): # https://dlmf.nist.gov/10.49.E13 x = np.array([0.12, 1.23, 12.34, 123.45]) assert_allclose(spherical_kn(2, x), pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3)) def test_spherical_kn_recurrence_real(self): # https://dlmf.nist.gov/10.51.E4 n = np.array([1, 2, 3, 7, 12]) x = 0.12 assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), (-1)**n*(2*n + 1)/x*spherical_kn(n, x)) def test_spherical_kn_recurrence_complex(self): # https://dlmf.nist.gov/10.51.E4 n = np.array([1, 2, 3, 7, 12]) x = 1.1 + 1.5j assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x), (-1)**n*(2*n + 1)/x*spherical_kn(n, x)) def test_spherical_kn_inf_real(self): # https://dlmf.nist.gov/10.52.E6 n = 5 x = np.array([-inf, inf]) assert_allclose(spherical_kn(n, x), np.array([-inf, 0])) def test_spherical_kn_inf_complex(self): # https://dlmf.nist.gov/10.52.E6 # The behavior at complex infinity depends on the sign of the real # part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's # z*inf. This distinction cannot be captured, so we return nan. n = 7 x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan])) def test_spherical_kn_at_zero(self): # https://dlmf.nist.gov/10.52.E2 n = np.array([0, 1, 2, 5, 10, 100]) x = 0 assert_allclose(spherical_kn(n, x), np.full(n.shape, inf)) def test_spherical_kn_at_zero_complex(self): # https://dlmf.nist.gov/10.52.E2 n = np.array([0, 1, 2, 5, 10, 100]) x = 0 + 0j assert_allclose(spherical_kn(n, x), np.full(n.shape, nan)) class SphericalDerivativesTestCase: def fundamental_theorem(self, n, a, b): integral, tolerance = quad(lambda z: self.df(n, z), a, b) assert_allclose(integral, self.f(n, b) - self.f(n, a), atol=tolerance) @pytest.mark.slow def test_fundamental_theorem_0(self): self.fundamental_theorem(0, 3.0, 15.0) @pytest.mark.slow def test_fundamental_theorem_7(self): self.fundamental_theorem(7, 0.5, 1.2) class TestSphericalJnDerivatives(SphericalDerivativesTestCase): def f(self, n, z): return spherical_jn(n, z) def df(self, n, z): return spherical_jn(n, z, derivative=True) def test_spherical_jn_d_zero(self): n = np.array([0, 1, 2, 3, 7, 15]) assert_allclose(spherical_jn(n, 0, derivative=True), np.array([0, 1/3, 0, 0, 0, 0])) class TestSphericalYnDerivatives(SphericalDerivativesTestCase): def f(self, n, z): return spherical_yn(n, z) def df(self, n, z): return spherical_yn(n, z, derivative=True) class TestSphericalInDerivatives(SphericalDerivativesTestCase): def f(self, n, z): return spherical_in(n, z) def df(self, n, z): return spherical_in(n, z, derivative=True) def test_spherical_in_d_zero(self): n = np.array([1, 2, 3, 7, 15]) assert_allclose(spherical_in(n, 0, derivative=True), np.zeros(5)) class TestSphericalKnDerivatives(SphericalDerivativesTestCase): def f(self, n, z): return spherical_kn(n, z) def df(self, n, z): return spherical_kn(n, z, derivative=True) class TestSphericalOld: # These are tests from the TestSpherical class of test_basic.py, # rewritten to use spherical_* instead of sph_* but otherwise unchanged. def test_sph_in(self): # This test reproduces test_basic.TestSpherical.test_sph_in. i1n = np.empty((2,2)) x = 0.2 i1n[0][0] = spherical_in(0, x) i1n[0][1] = spherical_in(1, x) i1n[1][0] = spherical_in(0, x, derivative=True) i1n[1][1] = spherical_in(1, x, derivative=True) inp0 = (i1n[0][1]) inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1]) assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381, 0.066933714568029540839]),12) assert_array_almost_equal(i1n[1],[inp0,inp1],12) def test_sph_in_kn_order0(self): x = 1. sph_i0 = np.empty((2,)) sph_i0[0] = spherical_in(0, x) sph_i0[1] = spherical_in(0, x, derivative=True) sph_i0_expected = np.array([np.sinh(x)/x, np.cosh(x)/x-np.sinh(x)/x**2]) assert_array_almost_equal(r_[sph_i0], sph_i0_expected) sph_k0 = np.empty((2,)) sph_k0[0] = spherical_kn(0, x) sph_k0[1] = spherical_kn(0, x, derivative=True) sph_k0_expected = np.array([0.5*pi*exp(-x)/x, -0.5*pi*exp(-x)*(1/x+1/x**2)]) assert_array_almost_equal(r_[sph_k0], sph_k0_expected) def test_sph_jn(self): s1 = np.empty((2,3)) x = 0.2 s1[0][0] = spherical_jn(0, x) s1[0][1] = spherical_jn(1, x) s1[0][2] = spherical_jn(2, x) s1[1][0] = spherical_jn(0, x, derivative=True) s1[1][1] = spherical_jn(1, x, derivative=True) s1[1][2] = spherical_jn(2, x, derivative=True) s10 = -s1[0][1] s11 = s1[0][0]-2.0/0.2*s1[0][1] s12 = s1[0][1]-3.0/0.2*s1[0][2] assert_array_almost_equal(s1[0],[0.99334665397530607731, 0.066400380670322230863, 0.0026590560795273856680],12) assert_array_almost_equal(s1[1],[s10,s11,s12],12) def test_sph_kn(self): kn = np.empty((2,3)) x = 0.2 kn[0][0] = spherical_kn(0, x) kn[0][1] = spherical_kn(1, x) kn[0][2] = spherical_kn(2, x) kn[1][0] = spherical_kn(0, x, derivative=True) kn[1][1] = spherical_kn(1, x, derivative=True) kn[1][2] = spherical_kn(2, x, derivative=True) kn0 = -kn[0][1] kn1 = -kn[0][0]-2.0/0.2*kn[0][1] kn2 = -kn[0][1]-3.0/0.2*kn[0][2] assert_array_almost_equal(kn[0],[6.4302962978445670140, 38.581777787067402086, 585.15696310385559829],12) assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9) def test_sph_yn(self): sy1 = spherical_yn(2, 0.2) sy2 = spherical_yn(0, 0.2) assert_almost_equal(sy1,-377.52483,5) # previous values in the system assert_almost_equal(sy2,-4.9003329,5) sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3 sy3 = spherical_yn(1, 0.2, derivative=True) assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
jamestwebber/scipy
scipy/special/tests/test_spherical_bessel.py
scipy/special/_precompute/setup.py
"""Support for Volvo On Call.""" from datetime import timedelta import logging import voluptuous as vol from volvooncall import Connection from homeassistant.const import ( CONF_NAME, CONF_PASSWORD, CONF_REGION, CONF_RESOURCES, CONF_SCAN_INTERVAL, CONF_USERNAME, ) from homeassistant.helpers import discovery from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_point_in_utc_time from homeassistant.util.dt import utcnow DOMAIN = "volvooncall" DATA_KEY = DOMAIN _LOGGER = logging.getLogger(__name__) MIN_UPDATE_INTERVAL = timedelta(minutes=1) DEFAULT_UPDATE_INTERVAL = timedelta(minutes=1) CONF_SERVICE_URL = "service_url" CONF_SCANDINAVIAN_MILES = "scandinavian_miles" CONF_MUTABLE = "mutable" SIGNAL_STATE_UPDATED = f"{DOMAIN}.updated" PLATFORMS = { "sensor": "sensor", "binary_sensor": "binary_sensor", "lock": "lock", "device_tracker": "device_tracker", "switch": "switch", } RESOURCES = [ "position", "lock", "heater", "odometer", "trip_meter1", "trip_meter2", "average_speed", "fuel_amount", "fuel_amount_level", "average_fuel_consumption", "distance_to_empty", "washer_fluid_level", "brake_fluid", "service_warning_status", "bulb_failures", "battery_range", "battery_level", "time_to_fully_charged", "battery_charge_status", "engine_start", "last_trip", "is_engine_running", "doors_hood_open", "doors_tailgate_open", "doors_front_left_door_open", "doors_front_right_door_open", "doors_rear_left_door_open", "doors_rear_right_door_open", "windows_front_left_window_open", "windows_front_right_window_open", "windows_rear_left_window_open", "windows_rear_right_window_open", "tyre_pressure_front_left_tyre_pressure", "tyre_pressure_front_right_tyre_pressure", "tyre_pressure_rear_left_tyre_pressure", "tyre_pressure_rear_right_tyre_pressure", "any_door_open", "any_window_open", ] CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional( CONF_SCAN_INTERVAL, default=DEFAULT_UPDATE_INTERVAL ): vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL)), vol.Optional(CONF_NAME, default={}): cv.schema_with_slug_keys( cv.string ), vol.Optional(CONF_RESOURCES): vol.All( cv.ensure_list, [vol.In(RESOURCES)] ), vol.Optional(CONF_REGION): cv.string, vol.Optional(CONF_SERVICE_URL): cv.string, vol.Optional(CONF_MUTABLE, default=True): cv.boolean, vol.Optional(CONF_SCANDINAVIAN_MILES, default=False): cv.boolean, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Volvo On Call component.""" session = async_get_clientsession(hass) connection = Connection( session=session, username=config[DOMAIN].get(CONF_USERNAME), password=config[DOMAIN].get(CONF_PASSWORD), service_url=config[DOMAIN].get(CONF_SERVICE_URL), region=config[DOMAIN].get(CONF_REGION), ) interval = config[DOMAIN][CONF_SCAN_INTERVAL] data = hass.data[DATA_KEY] = VolvoData(config) def is_enabled(attr): """Return true if the user has enabled the resource.""" return attr in config[DOMAIN].get(CONF_RESOURCES, [attr]) def discover_vehicle(vehicle): """Load relevant platforms.""" data.vehicles.add(vehicle.vin) dashboard = vehicle.dashboard( mutable=config[DOMAIN][CONF_MUTABLE], scandinavian_miles=config[DOMAIN][CONF_SCANDINAVIAN_MILES], ) for instrument in ( instrument for instrument in dashboard.instruments if instrument.component in PLATFORMS and is_enabled(instrument.slug_attr) ): data.instruments.add(instrument) hass.async_create_task( discovery.async_load_platform( hass, PLATFORMS[instrument.component], DOMAIN, (vehicle.vin, instrument.component, instrument.attr), config, ) ) async def update(now): """Update status from the online service.""" try: if not await connection.update(journal=True): _LOGGER.warning("Could not query server") return False for vehicle in connection.vehicles: if vehicle.vin not in data.vehicles: discover_vehicle(vehicle) async_dispatcher_send(hass, SIGNAL_STATE_UPDATED) return True finally: async_track_point_in_utc_time(hass, update, utcnow() + interval) _LOGGER.info("Logging in to service") return await update(utcnow()) class VolvoData: """Hold component state.""" def __init__(self, config): """Initialize the component state.""" self.vehicles = set() self.instruments = set() self.config = config[DOMAIN] self.names = self.config.get(CONF_NAME) def instrument(self, vin, component, attr): """Return corresponding instrument.""" return next( ( instrument for instrument in self.instruments if instrument.vehicle.vin == vin and instrument.component == component and instrument.attr == attr ), None, ) def vehicle_name(self, vehicle): """Provide a friendly name for a vehicle.""" if ( vehicle.registration_number and vehicle.registration_number.lower() ) in self.names: return self.names[vehicle.registration_number.lower()] if vehicle.vin and vehicle.vin.lower() in self.names: return self.names[vehicle.vin.lower()] if vehicle.registration_number: return vehicle.registration_number if vehicle.vin: return vehicle.vin return "" class VolvoEntity(Entity): """Base class for all VOC entities.""" def __init__(self, data, vin, component, attribute): """Initialize the entity.""" self.data = data self.vin = vin self.component = component self.attribute = attribute async def async_added_to_hass(self): """Register update dispatcher.""" self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_STATE_UPDATED, self.async_write_ha_state ) ) @property def instrument(self): """Return corresponding instrument.""" return self.data.instrument(self.vin, self.component, self.attribute) @property def icon(self): """Return the icon.""" return self.instrument.icon @property def vehicle(self): """Return vehicle.""" return self.instrument.vehicle @property def _entity_name(self): return self.instrument.name @property def _vehicle_name(self): return self.data.vehicle_name(self.vehicle) @property def name(self): """Return full name of the entity.""" return f"{self._vehicle_name} {self._entity_name}" @property def should_poll(self): """Return the polling state.""" return False @property def assumed_state(self): """Return true if unable to access real state of entity.""" return True @property def extra_state_attributes(self): """Return device specific state attributes.""" return dict( self.instrument.attributes, model=f"{self.vehicle.vehicle_type}/{self.vehicle.model_year}", ) @property def unique_id(self) -> str: """Return a unique ID.""" return f"{self.vin}-{self.component}-{self.attribute}"
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/volvooncall/__init__.py
"""Support for Verisure Smartplugs.""" from __future__ import annotations from time import monotonic from homeassistant.components.switch import SwitchEntity from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import CONF_GIID, DOMAIN from .coordinator import VerisureDataUpdateCoordinator async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up Verisure alarm control panel from a config entry.""" coordinator: VerisureDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] async_add_entities( VerisureSmartplug(coordinator, serial_number) for serial_number in coordinator.data["smart_plugs"] ) class VerisureSmartplug(CoordinatorEntity, SwitchEntity): """Representation of a Verisure smartplug.""" coordinator: VerisureDataUpdateCoordinator def __init__( self, coordinator: VerisureDataUpdateCoordinator, serial_number: str ) -> None: """Initialize the Verisure device.""" super().__init__(coordinator) self._attr_name = coordinator.data["smart_plugs"][serial_number]["area"] self._attr_unique_id = serial_number self.serial_number = serial_number self._change_timestamp = 0 self._state = False @property def device_info(self) -> DeviceInfo: """Return device information about this entity.""" area = self.coordinator.data["smart_plugs"][self.serial_number]["area"] return { "name": area, "suggested_area": area, "manufacturer": "Verisure", "model": "SmartPlug", "identifiers": {(DOMAIN, self.serial_number)}, "via_device": (DOMAIN, self.coordinator.entry.data[CONF_GIID]), } @property def is_on(self) -> bool: """Return true if on.""" if monotonic() - self._change_timestamp < 10: return self._state self._state = ( self.coordinator.data["smart_plugs"][self.serial_number]["currentState"] == "ON" ) return self._state @property def available(self) -> bool: """Return True if entity is available.""" return ( super().available and self.serial_number in self.coordinator.data["smart_plugs"] ) def turn_on(self, **kwargs) -> None: """Set smartplug status on.""" self.coordinator.verisure.set_smartplug_state(self.serial_number, True) self._state = True self._change_timestamp = monotonic() self.schedule_update_ha_state() def turn_off(self, **kwargs) -> None: """Set smartplug status off.""" self.coordinator.verisure.set_smartplug_state(self.serial_number, False) self._state = False self._change_timestamp = monotonic() self.schedule_update_ha_state()
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/verisure/switch.py
"""Support for WaterHeater devices of (EMEA/EU) Honeywell TCC systems.""" from __future__ import annotations import logging from homeassistant.components.water_heater import ( SUPPORT_AWAY_MODE, SUPPORT_OPERATION_MODE, WaterHeaterEntity, ) from homeassistant.const import PRECISION_TENTHS, PRECISION_WHOLE, STATE_OFF, STATE_ON from homeassistant.core import HomeAssistant from homeassistant.helpers.typing import ConfigType import homeassistant.util.dt as dt_util from . import EvoChild from .const import DOMAIN, EVO_FOLLOW, EVO_PERMOVER _LOGGER = logging.getLogger(__name__) STATE_AUTO = "auto" HA_STATE_TO_EVO = {STATE_AUTO: "", STATE_ON: "On", STATE_OFF: "Off"} EVO_STATE_TO_HA = {v: k for k, v in HA_STATE_TO_EVO.items() if k != ""} STATE_ATTRS_DHW = ["dhwId", "activeFaults", "stateStatus", "temperatureStatus"] async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None ) -> None: """Create a DHW controller.""" if discovery_info is None: return broker = hass.data[DOMAIN]["broker"] _LOGGER.debug( "Adding: DhwController (%s), id=%s", broker.tcs.hotwater.zone_type, broker.tcs.hotwater.zoneId, ) new_entity = EvoDHW(broker, broker.tcs.hotwater) async_add_entities([new_entity], update_before_add=True) class EvoDHW(EvoChild, WaterHeaterEntity): """Base for a Honeywell TCC DHW controller (aka boiler).""" def __init__(self, evo_broker, evo_device) -> None: """Initialize an evohome DHW controller.""" super().__init__(evo_broker, evo_device) self._unique_id = evo_device.dhwId self._name = "DHW controller" self._icon = "mdi:thermometer-lines" self._precision = PRECISION_TENTHS if evo_broker.client_v1 else PRECISION_WHOLE self._supported_features = SUPPORT_AWAY_MODE | SUPPORT_OPERATION_MODE @property def state(self): """Return the current state.""" return EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]] @property def current_operation(self) -> str: """Return the current operating mode (Auto, On, or Off).""" if self._evo_device.stateStatus["mode"] == EVO_FOLLOW: return STATE_AUTO return EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]] @property def operation_list(self) -> list[str]: """Return the list of available operations.""" return list(HA_STATE_TO_EVO) @property def is_away_mode_on(self): """Return True if away mode is on.""" is_off = EVO_STATE_TO_HA[self._evo_device.stateStatus["state"]] == STATE_OFF is_permanent = self._evo_device.stateStatus["mode"] == EVO_PERMOVER return is_off and is_permanent async def async_set_operation_mode(self, operation_mode: str) -> None: """Set new operation mode for a DHW controller. Except for Auto, the mode is only until the next SetPoint. """ if operation_mode == STATE_AUTO: await self._evo_broker.call_client_api(self._evo_device.set_dhw_auto()) else: await self._update_schedule() until = dt_util.parse_datetime(self.setpoints.get("next_sp_from", "")) until = dt_util.as_utc(until) if until else None if operation_mode == STATE_ON: await self._evo_broker.call_client_api( self._evo_device.set_dhw_on(until=until) ) else: # STATE_OFF await self._evo_broker.call_client_api( self._evo_device.set_dhw_off(until=until) ) async def async_turn_away_mode_on(self): """Turn away mode on.""" await self._evo_broker.call_client_api(self._evo_device.set_dhw_off()) async def async_turn_away_mode_off(self): """Turn away mode off.""" await self._evo_broker.call_client_api(self._evo_device.set_dhw_auto()) async def async_turn_on(self): """Turn on.""" await self._evo_broker.call_client_api(self._evo_device.set_dhw_on()) async def async_turn_off(self): """Turn off.""" await self._evo_broker.call_client_api(self._evo_device.set_dhw_off()) async def async_update(self) -> None: """Get the latest state data for a DHW controller.""" await super().async_update() for attr in STATE_ATTRS_DHW: self._device_state_attrs[attr] = getattr(self._evo_device, attr)
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/evohome/water_heater.py
"""Provides a binary sensor which gets its values from a TCP socket.""" from __future__ import annotations from typing import Any, Final from homeassistant.components.binary_sensor import ( PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA, BinarySensorEntity, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType from .common import TCP_PLATFORM_SCHEMA, TcpEntity from .const import CONF_VALUE_ON PLATFORM_SCHEMA: Final = PARENT_PLATFORM_SCHEMA.extend(TCP_PLATFORM_SCHEMA) def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: dict[str, Any] | None = None, ) -> None: """Set up the TCP binary sensor.""" add_entities([TcpBinarySensor(hass, config)]) class TcpBinarySensor(TcpEntity, BinarySensorEntity): """A binary sensor which is on when its state == CONF_VALUE_ON.""" @property def is_on(self) -> bool: """Return true if the binary sensor is on.""" return self._state == self._config[CONF_VALUE_ON]
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/tcp/binary_sensor.py
"""Support for Z-Wave switches.""" import time from homeassistant.components.switch import DOMAIN, SwitchEntity from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from . import ZWaveDeviceEntity, workaround async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Z-Wave Switch from Config Entry.""" @callback def async_add_switch(switch): """Add Z-Wave Switch.""" async_add_entities([switch]) async_dispatcher_connect(hass, "zwave_new_switch", async_add_switch) def get_device(values, **kwargs): """Create zwave entity device.""" return ZwaveSwitch(values) class ZwaveSwitch(ZWaveDeviceEntity, SwitchEntity): """Representation of a Z-Wave switch.""" def __init__(self, values): """Initialize the Z-Wave switch device.""" ZWaveDeviceEntity.__init__(self, values, DOMAIN) self.refresh_on_update = ( workaround.get_device_mapping(values.primary) == workaround.WORKAROUND_REFRESH_NODE_ON_UPDATE ) self.last_update = time.perf_counter() self._state = self.values.primary.data def update_properties(self): """Handle data changes for node values.""" self._state = self.values.primary.data if self.refresh_on_update and time.perf_counter() - self.last_update > 30: self.last_update = time.perf_counter() self.node.request_state() @property def is_on(self): """Return true if device is on.""" return self._state def turn_on(self, **kwargs): """Turn the device on.""" self.node.set_switch(self.values.primary.value_id, True) def turn_off(self, **kwargs): """Turn the device off.""" self.node.set_switch(self.values.primary.value_id, False)
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/zwave/switch.py
"""Provides functionality to interact with fans.""" from __future__ import annotations from datetime import timedelta import functools as ft import logging import math from typing import final import voluptuous as vol from homeassistant.const import ( SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ON, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.config_validation import ( # noqa: F401 PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE, ) from homeassistant.helpers.entity import ToggleEntity from homeassistant.helpers.entity_component import EntityComponent from homeassistant.loader import bind_hass from homeassistant.util.percentage import ( ordered_list_item_to_percentage, percentage_to_ordered_list_item, percentage_to_ranged_value, ranged_value_to_percentage, ) _LOGGER = logging.getLogger(__name__) DOMAIN = "fan" SCAN_INTERVAL = timedelta(seconds=30) ENTITY_ID_FORMAT = DOMAIN + ".{}" # Bitfield of features supported by the fan entity SUPPORT_SET_SPEED = 1 SUPPORT_OSCILLATE = 2 SUPPORT_DIRECTION = 4 SUPPORT_PRESET_MODE = 8 SERVICE_SET_SPEED = "set_speed" SERVICE_INCREASE_SPEED = "increase_speed" SERVICE_DECREASE_SPEED = "decrease_speed" SERVICE_OSCILLATE = "oscillate" SERVICE_SET_DIRECTION = "set_direction" SERVICE_SET_PERCENTAGE = "set_percentage" SERVICE_SET_PRESET_MODE = "set_preset_mode" SPEED_OFF = "off" SPEED_LOW = "low" SPEED_MEDIUM = "medium" SPEED_HIGH = "high" DIRECTION_FORWARD = "forward" DIRECTION_REVERSE = "reverse" ATTR_SPEED = "speed" ATTR_PERCENTAGE = "percentage" ATTR_PERCENTAGE_STEP = "percentage_step" ATTR_SPEED_LIST = "speed_list" ATTR_OSCILLATING = "oscillating" ATTR_DIRECTION = "direction" ATTR_PRESET_MODE = "preset_mode" ATTR_PRESET_MODES = "preset_modes" # Invalid speeds do not conform to the entity model, but have crept # into core integrations at some point so we are temporarily # accommodating them in the transition to percentages. _NOT_SPEED_OFF = "off" _NOT_SPEED_ON = "on" _NOT_SPEED_AUTO = "auto" _NOT_SPEED_SMART = "smart" _NOT_SPEED_INTERVAL = "interval" _NOT_SPEED_IDLE = "idle" _NOT_SPEED_FAVORITE = "favorite" _NOT_SPEED_SLEEP = "sleep" _NOT_SPEED_SILENT = "silent" _NOT_SPEEDS_FILTER = { _NOT_SPEED_OFF, _NOT_SPEED_ON, _NOT_SPEED_AUTO, _NOT_SPEED_SMART, _NOT_SPEED_INTERVAL, _NOT_SPEED_IDLE, _NOT_SPEED_SILENT, _NOT_SPEED_SLEEP, _NOT_SPEED_FAVORITE, } _FAN_NATIVE = "_fan_native" OFF_SPEED_VALUES = [SPEED_OFF, None] LEGACY_SPEED_LIST = [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH] class NoValidSpeedsError(ValueError): """Exception class when there are no valid speeds.""" class NotValidSpeedError(ValueError): """Exception class when the speed in not in the speed list.""" class NotValidPresetModeError(ValueError): """Exception class when the preset_mode in not in the preset_modes list.""" @bind_hass def is_on(hass, entity_id: str) -> bool: """Return if the fans are on based on the statemachine.""" state = hass.states.get(entity_id) if ATTR_SPEED in state.attributes: return state.attributes[ATTR_SPEED] not in OFF_SPEED_VALUES return state.state == STATE_ON async def async_setup(hass, config: dict): """Expose fan control via statemachine and services.""" component = hass.data[DOMAIN] = EntityComponent( _LOGGER, DOMAIN, hass, SCAN_INTERVAL ) await component.async_setup(config) # After the transition to percentage and preset_modes concludes, # switch this back to async_turn_on and remove async_turn_on_compat component.async_register_entity_service( SERVICE_TURN_ON, { vol.Optional(ATTR_SPEED): cv.string, vol.Optional(ATTR_PERCENTAGE): vol.All( vol.Coerce(int), vol.Range(min=0, max=100) ), vol.Optional(ATTR_PRESET_MODE): cv.string, }, "async_turn_on_compat", ) component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off") component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle") # After the transition to percentage and preset_modes concludes, # remove this service component.async_register_entity_service( SERVICE_SET_SPEED, {vol.Required(ATTR_SPEED): cv.string}, "async_set_speed_deprecated", [SUPPORT_SET_SPEED], ) component.async_register_entity_service( SERVICE_INCREASE_SPEED, { vol.Optional(ATTR_PERCENTAGE_STEP): vol.All( vol.Coerce(int), vol.Range(min=0, max=100) ) }, "async_increase_speed", [SUPPORT_SET_SPEED], ) component.async_register_entity_service( SERVICE_DECREASE_SPEED, { vol.Optional(ATTR_PERCENTAGE_STEP): vol.All( vol.Coerce(int), vol.Range(min=0, max=100) ) }, "async_decrease_speed", [SUPPORT_SET_SPEED], ) component.async_register_entity_service( SERVICE_OSCILLATE, {vol.Required(ATTR_OSCILLATING): cv.boolean}, "async_oscillate", [SUPPORT_OSCILLATE], ) component.async_register_entity_service( SERVICE_SET_DIRECTION, {vol.Optional(ATTR_DIRECTION): cv.string}, "async_set_direction", [SUPPORT_DIRECTION], ) component.async_register_entity_service( SERVICE_SET_PERCENTAGE, { vol.Required(ATTR_PERCENTAGE): vol.All( vol.Coerce(int), vol.Range(min=0, max=100) ) }, "async_set_percentage", [SUPPORT_SET_SPEED], ) component.async_register_entity_service( SERVICE_SET_PRESET_MODE, {vol.Required(ATTR_PRESET_MODE): cv.string}, "async_set_preset_mode", [SUPPORT_SET_SPEED, SUPPORT_PRESET_MODE], ) return True async def async_setup_entry(hass, entry): """Set up a config entry.""" return await hass.data[DOMAIN].async_setup_entry(entry) async def async_unload_entry(hass, entry): """Unload a config entry.""" return await hass.data[DOMAIN].async_unload_entry(entry) def _fan_native(method): """Native fan method not overridden.""" setattr(method, _FAN_NATIVE, True) return method class FanEntity(ToggleEntity): """Base class for fan entities.""" @_fan_native def set_speed(self, speed: str) -> None: """Set the speed of the fan.""" raise NotImplementedError() async def async_set_speed_deprecated(self, speed: str): """Set the speed of the fan.""" _LOGGER.warning( "The fan.set_speed service is deprecated, use fan.set_percentage or fan.set_preset_mode instead" ) await self.async_set_speed(speed) @_fan_native async def async_set_speed(self, speed: str): """Set the speed of the fan.""" if speed == SPEED_OFF: await self.async_turn_off() return if speed in self.preset_modes: if not hasattr(self.async_set_preset_mode, _FAN_NATIVE): await self.async_set_preset_mode(speed) return if not hasattr(self.set_preset_mode, _FAN_NATIVE): await self.hass.async_add_executor_job(self.set_preset_mode, speed) return else: if not hasattr(self.async_set_percentage, _FAN_NATIVE): await self.async_set_percentage(self.speed_to_percentage(speed)) return if not hasattr(self.set_percentage, _FAN_NATIVE): await self.hass.async_add_executor_job( self.set_percentage, self.speed_to_percentage(speed) ) return await self.hass.async_add_executor_job(self.set_speed, speed) @_fan_native def set_percentage(self, percentage: int) -> None: """Set the speed of the fan, as a percentage.""" raise NotImplementedError() @_fan_native async def async_set_percentage(self, percentage: int) -> None: """Set the speed of the fan, as a percentage.""" if percentage == 0: await self.async_turn_off() elif not hasattr(self.set_percentage, _FAN_NATIVE): await self.hass.async_add_executor_job(self.set_percentage, percentage) else: await self.async_set_speed(self.percentage_to_speed(percentage)) async def async_increase_speed(self, percentage_step: int | None = None) -> None: """Increase the speed of the fan.""" await self._async_adjust_speed(1, percentage_step) async def async_decrease_speed(self, percentage_step: int | None = None) -> None: """Decrease the speed of the fan.""" await self._async_adjust_speed(-1, percentage_step) async def _async_adjust_speed( self, modifier: int, percentage_step: int | None ) -> None: """Increase or decrease the speed of the fan.""" current_percentage = self.percentage or 0 if percentage_step is not None: new_percentage = current_percentage + (percentage_step * modifier) else: speed_range = (1, self.speed_count) speed_index = math.ceil( percentage_to_ranged_value(speed_range, current_percentage) ) new_percentage = ranged_value_to_percentage( speed_range, speed_index + modifier ) new_percentage = max(0, min(100, new_percentage)) await self.async_set_percentage(new_percentage) @_fan_native def set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" self._valid_preset_mode_or_raise(preset_mode) self.set_speed(preset_mode) @_fan_native async def async_set_preset_mode(self, preset_mode: str) -> None: """Set new preset mode.""" if not hasattr(self.set_preset_mode, _FAN_NATIVE): await self.hass.async_add_executor_job(self.set_preset_mode, preset_mode) return self._valid_preset_mode_or_raise(preset_mode) await self.async_set_speed(preset_mode) def _valid_preset_mode_or_raise(self, preset_mode): """Raise NotValidPresetModeError on invalid preset_mode.""" preset_modes = self.preset_modes if preset_mode not in preset_modes: raise NotValidPresetModeError( f"The preset_mode {preset_mode} is not a valid preset_mode: {preset_modes}" ) def set_direction(self, direction: str) -> None: """Set the direction of the fan.""" raise NotImplementedError() async def async_set_direction(self, direction: str): """Set the direction of the fan.""" await self.hass.async_add_executor_job(self.set_direction, direction) # pylint: disable=arguments-differ def turn_on( self, speed: str | None = None, percentage: int | None = None, preset_mode: str | None = None, **kwargs, ) -> None: """Turn on the fan.""" raise NotImplementedError() async def async_turn_on_compat( self, speed: str | None = None, percentage: int | None = None, preset_mode: str | None = None, **kwargs, ) -> None: """Turn on the fan. This _compat version wraps async_turn_on with backwards and forward compatibility. After the transition to percentage and preset_modes concludes, it should be removed. """ if preset_mode is not None: self._valid_preset_mode_or_raise(preset_mode) speed = preset_mode percentage = None elif speed is not None: _LOGGER.warning( "Calling fan.turn_on with the speed argument is deprecated, use percentage or preset_mode instead" ) if speed in self.preset_modes: preset_mode = speed percentage = None else: percentage = self.speed_to_percentage(speed) elif percentage is not None: speed = self.percentage_to_speed(percentage) await self.async_turn_on( speed=speed, percentage=percentage, preset_mode=preset_mode, **kwargs, ) # pylint: disable=arguments-differ async def async_turn_on( self, speed: str | None = None, percentage: int | None = None, preset_mode: str | None = None, **kwargs, ) -> None: """Turn on the fan.""" if speed == SPEED_OFF: await self.async_turn_off() else: await self.hass.async_add_executor_job( ft.partial( self.turn_on, speed=speed, percentage=percentage, preset_mode=preset_mode, **kwargs, ) ) def oscillate(self, oscillating: bool) -> None: """Oscillate the fan.""" raise NotImplementedError() async def async_oscillate(self, oscillating: bool): """Oscillate the fan.""" await self.hass.async_add_executor_job(self.oscillate, oscillating) @property def is_on(self): """Return true if the entity is on.""" return self.speed not in [SPEED_OFF, None] @property def _implemented_percentage(self) -> bool: """Return true if percentage has been implemented.""" return not hasattr(self.set_percentage, _FAN_NATIVE) or not hasattr( self.async_set_percentage, _FAN_NATIVE ) @property def _implemented_preset_mode(self) -> bool: """Return true if preset_mode has been implemented.""" return not hasattr(self.set_preset_mode, _FAN_NATIVE) or not hasattr( self.async_set_preset_mode, _FAN_NATIVE ) @property def _implemented_speed(self) -> bool: """Return true if speed has been implemented.""" return not hasattr(self.set_speed, _FAN_NATIVE) or not hasattr( self.async_set_speed, _FAN_NATIVE ) @property def speed(self) -> str | None: """Return the current speed.""" if self._implemented_preset_mode: preset_mode = self.preset_mode if preset_mode: return preset_mode if self._implemented_percentage: percentage = self.percentage if percentage is None: return None return self.percentage_to_speed(percentage) return None @property def percentage(self) -> int | None: """Return the current speed as a percentage.""" if not self._implemented_preset_mode and self.speed in self.preset_modes: return None if not self._implemented_percentage: return self.speed_to_percentage(self.speed) return 0 @property def speed_count(self) -> int: """Return the number of speeds the fan supports.""" speed_list = speed_list_without_preset_modes(self.speed_list) if speed_list: return len(speed_list) return 100 @property def percentage_step(self) -> float: """Return the step size for percentage.""" return 100 / self.speed_count @property def speed_list(self) -> list: """Get the list of available speeds.""" speeds = [] if self._implemented_percentage: speeds += [SPEED_OFF, *LEGACY_SPEED_LIST] if self._implemented_preset_mode: speeds += self.preset_modes return speeds @property def current_direction(self) -> str | None: """Return the current direction of the fan.""" return None @property def oscillating(self): """Return whether or not the fan is currently oscillating.""" return None @property def capability_attributes(self): """Return capability attributes.""" attrs = {} if self.supported_features & SUPPORT_SET_SPEED: attrs[ATTR_SPEED_LIST] = self.speed_list if ( self.supported_features & SUPPORT_SET_SPEED or self.supported_features & SUPPORT_PRESET_MODE ): attrs[ATTR_PRESET_MODES] = self.preset_modes return attrs @property def _speed_list_without_preset_modes(self) -> list: """Return the speed list without preset modes. This property provides forward and backwards compatibility for conversion to percentage speeds. """ if not self._implemented_speed: return LEGACY_SPEED_LIST return speed_list_without_preset_modes(self.speed_list) def speed_to_percentage(self, speed: str) -> int: """ Map a speed to a percentage. Officially this should only have to deal with the 4 pre-defined speeds: return { SPEED_OFF: 0, SPEED_LOW: 33, SPEED_MEDIUM: 66, SPEED_HIGH: 100, }[speed] Unfortunately lots of fans make up their own speeds. So the default mapping is more dynamic. """ if speed in OFF_SPEED_VALUES: return 0 speed_list = self._speed_list_without_preset_modes if speed_list and speed not in speed_list: raise NotValidSpeedError(f"The speed {speed} is not a valid speed.") try: return ordered_list_item_to_percentage(speed_list, speed) except ValueError as ex: raise NoValidSpeedsError( f"The speed_list {speed_list} does not contain any valid speeds." ) from ex def percentage_to_speed(self, percentage: int) -> str: """ Map a percentage onto self.speed_list. Officially, this should only have to deal with 4 pre-defined speeds. if value == 0: return SPEED_OFF elif value <= 33: return SPEED_LOW elif value <= 66: return SPEED_MEDIUM else: return SPEED_HIGH Unfortunately there is currently a high degree of non-conformancy. Until fans have been corrected a more complicated and dynamic mapping is used. """ if percentage == 0: return SPEED_OFF speed_list = self._speed_list_without_preset_modes try: return percentage_to_ordered_list_item(speed_list, percentage) except ValueError as ex: raise NoValidSpeedsError( f"The speed_list {speed_list} does not contain any valid speeds." ) from ex @final @property def state_attributes(self) -> dict: """Return optional state attributes.""" data = {} supported_features = self.supported_features if supported_features & SUPPORT_DIRECTION: data[ATTR_DIRECTION] = self.current_direction if supported_features & SUPPORT_OSCILLATE: data[ATTR_OSCILLATING] = self.oscillating if supported_features & SUPPORT_SET_SPEED: data[ATTR_SPEED] = self.speed data[ATTR_PERCENTAGE] = self.percentage data[ATTR_PERCENTAGE_STEP] = self.percentage_step if ( supported_features & SUPPORT_PRESET_MODE or supported_features & SUPPORT_SET_SPEED ): data[ATTR_PRESET_MODE] = self.preset_mode return data @property def supported_features(self) -> int: """Flag supported features.""" return 0 @property def preset_mode(self) -> str | None: """Return the current preset mode, e.g., auto, smart, interval, favorite. Requires SUPPORT_SET_SPEED. """ speed = self.speed if speed in self.preset_modes: return speed return None @property def preset_modes(self) -> list[str] | None: """Return a list of available preset modes. Requires SUPPORT_SET_SPEED. """ return preset_modes_from_speed_list(self.speed_list) def speed_list_without_preset_modes(speed_list: list): """Filter out non-speeds from the speed list. The goal is to get the speeds in a list from lowest to highest by removing speeds that are not valid or out of order so we can map them to percentages. Examples: input: ["off", "low", "low-medium", "medium", "medium-high", "high", "auto"] output: ["low", "low-medium", "medium", "medium-high", "high"] input: ["off", "auto", "low", "medium", "high"] output: ["low", "medium", "high"] input: ["off", "1", "2", "3", "4", "5", "6", "7", "smart"] output: ["1", "2", "3", "4", "5", "6", "7"] input: ["Auto", "Silent", "Favorite", "Idle", "Medium", "High", "Strong"] output: ["Medium", "High", "Strong"] """ return [speed for speed in speed_list if speed.lower() not in _NOT_SPEEDS_FILTER] def preset_modes_from_speed_list(speed_list: list): """Filter out non-preset modes from the speed list. The goal is to return only preset modes. Examples: input: ["off", "low", "low-medium", "medium", "medium-high", "high", "auto"] output: ["auto"] input: ["off", "auto", "low", "medium", "high"] output: ["auto"] input: ["off", "1", "2", "3", "4", "5", "6", "7", "smart"] output: ["smart"] input: ["Auto", "Silent", "Favorite", "Idle", "Medium", "High", "Strong"] output: ["Auto", "Silent", "Favorite", "Idle"] """ return [ speed for speed in speed_list if speed.lower() in _NOT_SPEEDS_FILTER and speed.lower() != SPEED_OFF ]
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/fan/__init__.py
"""Support for tracking which astronomical or meteorological season it is.""" from datetime import datetime import logging import ephem import voluptuous as vol from homeassistant import util from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity from homeassistant.const import CONF_NAME, CONF_TYPE import homeassistant.helpers.config_validation as cv from homeassistant.util.dt import utcnow _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "Season" EQUATOR = "equator" NORTHERN = "northern" SOUTHERN = "southern" STATE_AUTUMN = "autumn" STATE_SPRING = "spring" STATE_SUMMER = "summer" STATE_WINTER = "winter" TYPE_ASTRONOMICAL = "astronomical" TYPE_METEOROLOGICAL = "meteorological" VALID_TYPES = [TYPE_ASTRONOMICAL, TYPE_METEOROLOGICAL] HEMISPHERE_SEASON_SWAP = { STATE_WINTER: STATE_SUMMER, STATE_SPRING: STATE_AUTUMN, STATE_AUTUMN: STATE_SPRING, STATE_SUMMER: STATE_WINTER, } SEASON_ICONS = { STATE_SPRING: "mdi:flower", STATE_SUMMER: "mdi:sunglasses", STATE_AUTUMN: "mdi:leaf", STATE_WINTER: "mdi:snowflake", } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_TYPE, default=TYPE_ASTRONOMICAL): vol.In(VALID_TYPES), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Display the current season.""" if None in (hass.config.latitude, hass.config.longitude): _LOGGER.error("Latitude or longitude not set in Home Assistant config") return False latitude = util.convert(hass.config.latitude, float) _type = config.get(CONF_TYPE) name = config.get(CONF_NAME) if latitude < 0: hemisphere = SOUTHERN elif latitude > 0: hemisphere = NORTHERN else: hemisphere = EQUATOR _LOGGER.debug(_type) add_entities([Season(hass, hemisphere, _type, name)], True) return True def get_season(date, hemisphere, season_tracking_type): """Calculate the current season.""" if hemisphere == "equator": return None if season_tracking_type == TYPE_ASTRONOMICAL: spring_start = ephem.next_equinox(str(date.year)).datetime() summer_start = ephem.next_solstice(str(date.year)).datetime() autumn_start = ephem.next_equinox(spring_start).datetime() winter_start = ephem.next_solstice(summer_start).datetime() else: spring_start = datetime(2017, 3, 1).replace(year=date.year) summer_start = spring_start.replace(month=6) autumn_start = spring_start.replace(month=9) winter_start = spring_start.replace(month=12) if spring_start <= date < summer_start: season = STATE_SPRING elif summer_start <= date < autumn_start: season = STATE_SUMMER elif autumn_start <= date < winter_start: season = STATE_AUTUMN elif winter_start <= date or spring_start > date: season = STATE_WINTER # If user is located in the southern hemisphere swap the season if hemisphere == NORTHERN: return season return HEMISPHERE_SEASON_SWAP.get(season) class Season(SensorEntity): """Representation of the current season.""" def __init__(self, hass, hemisphere, season_tracking_type, name): """Initialize the season.""" self.hass = hass self._name = name self.hemisphere = hemisphere self.datetime = None self.type = season_tracking_type self.season = None @property def name(self): """Return the name.""" return self._name @property def state(self): """Return the current season.""" return self.season @property def device_class(self): """Return the device class.""" return "season__season" @property def icon(self): """Icon to use in the frontend, if any.""" return SEASON_ICONS.get(self.season, "mdi:cloud") def update(self): """Update season.""" self.datetime = utcnow().replace(tzinfo=None) self.season = get_season(self.datetime, self.hemisphere, self.type)
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/season/sensor.py
"""Methods and classes related to executing Z-Wave commands and publishing these to hass.""" import logging from openzwavemqtt.const import ATTR_LABEL, ATTR_POSITION, ATTR_VALUE from openzwavemqtt.util.node import get_node_from_manager, set_config_parameter import voluptuous as vol from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from . import const _LOGGER = logging.getLogger(__name__) class ZWaveServices: """Class that holds our services ( Zwave Commands) that should be published to hass.""" def __init__(self, hass, manager): """Initialize with both hass and ozwmanager objects.""" self._hass = hass self._manager = manager @callback def async_register(self): """Register all our services.""" self._hass.services.async_register( const.DOMAIN, const.SERVICE_ADD_NODE, self.async_add_node, schema=vol.Schema( { vol.Optional(const.ATTR_INSTANCE_ID, default=1): vol.Coerce(int), vol.Optional(const.ATTR_SECURE, default=False): vol.Coerce(bool), } ), ) self._hass.services.async_register( const.DOMAIN, const.SERVICE_REMOVE_NODE, self.async_remove_node, schema=vol.Schema( {vol.Optional(const.ATTR_INSTANCE_ID, default=1): vol.Coerce(int)} ), ) self._hass.services.async_register( const.DOMAIN, const.SERVICE_CANCEL_COMMAND, self.async_cancel_command, schema=vol.Schema( {vol.Optional(const.ATTR_INSTANCE_ID, default=1): vol.Coerce(int)} ), ) self._hass.services.async_register( const.DOMAIN, const.SERVICE_SET_CONFIG_PARAMETER, self.async_set_config_parameter, schema=vol.Schema( { vol.Optional(const.ATTR_INSTANCE_ID, default=1): vol.Coerce(int), vol.Required(const.ATTR_NODE_ID): vol.Coerce(int), vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int), vol.Required(const.ATTR_CONFIG_VALUE): vol.Any( vol.All( cv.ensure_list, [ vol.All( { vol.Exclusive(ATTR_LABEL, "bit"): cv.string, vol.Exclusive(ATTR_POSITION, "bit"): vol.Coerce( int ), vol.Required(ATTR_VALUE): bool, }, cv.has_at_least_one_key(ATTR_LABEL, ATTR_POSITION), ) ], ), vol.Coerce(int), bool, cv.string, ), } ), ) @callback def async_set_config_parameter(self, service): """Set a config parameter to a node.""" instance_id = service.data[const.ATTR_INSTANCE_ID] node_id = service.data[const.ATTR_NODE_ID] param = service.data[const.ATTR_CONFIG_PARAMETER] selection = service.data[const.ATTR_CONFIG_VALUE] # These function calls may raise an exception but that's ok because # the exception will show in the UI to the user node = get_node_from_manager(self._manager, instance_id, node_id) payload = set_config_parameter(node, param, selection) _LOGGER.info( "Setting configuration parameter %s on Node %s with value %s", param, node_id, payload, ) @callback def async_add_node(self, service): """Enter inclusion mode on the controller.""" instance_id = service.data[const.ATTR_INSTANCE_ID] secure = service.data[const.ATTR_SECURE] instance = self._manager.get_instance(instance_id) if instance is None: raise ValueError(f"No OpenZWave Instance with ID {instance_id}") instance.add_node(secure) @callback def async_remove_node(self, service): """Enter exclusion mode on the controller.""" instance_id = service.data[const.ATTR_INSTANCE_ID] instance = self._manager.get_instance(instance_id) if instance is None: raise ValueError(f"No OpenZWave Instance with ID {instance_id}") instance.remove_node() @callback def async_cancel_command(self, service): """Tell the controller to cancel an add or remove command.""" instance_id = service.data[const.ATTR_INSTANCE_ID] instance = self._manager.get_instance(instance_id) if instance is None: raise ValueError(f"No OpenZWave Instance with ID {instance_id}") instance.cancel_controller_command()
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/ozw/services.py
"""Config flow utilities.""" from collections import OrderedDict from pyvesync import VeSync import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from homeassistant.core import callback from .const import DOMAIN class VeSyncFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow.""" VERSION = 1 def __init__(self): """Instantiate config flow.""" self._username = None self._password = None self.data_schema = OrderedDict() self.data_schema[vol.Required(CONF_USERNAME)] = str self.data_schema[vol.Required(CONF_PASSWORD)] = str @callback def _show_form(self, errors=None): """Show form to the user.""" return self.async_show_form( step_id="user", data_schema=vol.Schema(self.data_schema), errors=errors if errors else {}, ) async def async_step_import(self, import_config): """Handle external yaml configuration.""" return await self.async_step_user(import_config) async def async_step_user(self, user_input=None): """Handle a flow start.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") if not user_input: return self._show_form() self._username = user_input[CONF_USERNAME] self._password = user_input[CONF_PASSWORD] manager = VeSync(self._username, self._password) login = await self.hass.async_add_executor_job(manager.login) if not login: return self._show_form(errors={"base": "invalid_auth"}) return self.async_create_entry( title=self._username, data={CONF_USERNAME: self._username, CONF_PASSWORD: self._password}, )
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/vesync/config_flow.py
"""Component to interface with cameras.""" from __future__ import annotations import asyncio import base64 import collections from collections.abc import Awaitable, Mapping from contextlib import suppress from datetime import datetime, timedelta import hashlib import logging import os from random import SystemRandom from typing import Callable, Final, cast, final from aiohttp import web import async_timeout import attr import voluptuous as vol from homeassistant.components import websocket_api from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView from homeassistant.components.media_player.const import ( ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_EXTRA, DOMAIN as DOMAIN_MP, SERVICE_PLAY_MEDIA, ) from homeassistant.components.stream import Stream, create_stream from homeassistant.components.stream.const import FORMAT_CONTENT_TYPE, OUTPUT_FORMATS from homeassistant.components.websocket_api import ActiveConnection from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( ATTR_ENTITY_ID, CONF_FILENAME, CONTENT_TYPE_MULTIPART, EVENT_HOMEASSISTANT_START, SERVICE_TURN_OFF, SERVICE_TURN_ON, ) from homeassistant.core import Event, HomeAssistant, ServiceCall, callback from homeassistant.exceptions import HomeAssistantError import homeassistant.helpers.config_validation as cv from homeassistant.helpers.config_validation import ( # noqa: F401 PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE, ) from homeassistant.helpers.entity import Entity, entity_sources from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.network import get_url from homeassistant.helpers.typing import ConfigType from homeassistant.loader import bind_hass from .const import ( CAMERA_IMAGE_TIMEOUT, CAMERA_STREAM_SOURCE_TIMEOUT, CONF_DURATION, CONF_LOOKBACK, DATA_CAMERA_PREFS, DOMAIN, SERVICE_RECORD, ) from .prefs import CameraPreferences # mypy: allow-untyped-calls _LOGGER = logging.getLogger(__name__) SERVICE_ENABLE_MOTION: Final = "enable_motion_detection" SERVICE_DISABLE_MOTION: Final = "disable_motion_detection" SERVICE_SNAPSHOT: Final = "snapshot" SERVICE_PLAY_STREAM: Final = "play_stream" SCAN_INTERVAL: Final = timedelta(seconds=30) ENTITY_ID_FORMAT: Final = DOMAIN + ".{}" ATTR_FILENAME: Final = "filename" ATTR_MEDIA_PLAYER: Final = "media_player" ATTR_FORMAT: Final = "format" STATE_RECORDING: Final = "recording" STATE_STREAMING: Final = "streaming" STATE_IDLE: Final = "idle" # Bitfield of features supported by the camera entity SUPPORT_ON_OFF: Final = 1 SUPPORT_STREAM: Final = 2 DEFAULT_CONTENT_TYPE: Final = "image/jpeg" ENTITY_IMAGE_URL: Final = "/api/camera_proxy/{0}?token={1}" TOKEN_CHANGE_INTERVAL: Final = timedelta(minutes=5) _RND: Final = SystemRandom() MIN_STREAM_INTERVAL: Final = 0.5 # seconds CAMERA_SERVICE_SNAPSHOT: Final = {vol.Required(ATTR_FILENAME): cv.template} CAMERA_SERVICE_PLAY_STREAM: Final = { vol.Required(ATTR_MEDIA_PLAYER): cv.entities_domain(DOMAIN_MP), vol.Optional(ATTR_FORMAT, default="hls"): vol.In(OUTPUT_FORMATS), } CAMERA_SERVICE_RECORD: Final = { vol.Required(CONF_FILENAME): cv.template, vol.Optional(CONF_DURATION, default=30): vol.Coerce(int), vol.Optional(CONF_LOOKBACK, default=0): vol.Coerce(int), } WS_TYPE_CAMERA_THUMBNAIL: Final = "camera_thumbnail" SCHEMA_WS_CAMERA_THUMBNAIL: Final = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend( { vol.Required("type"): WS_TYPE_CAMERA_THUMBNAIL, vol.Required("entity_id"): cv.entity_id, } ) @attr.s class Image: """Represent an image.""" content_type: str = attr.ib() content: bytes = attr.ib() @bind_hass async def async_request_stream(hass: HomeAssistant, entity_id: str, fmt: str) -> str: """Request a stream for a camera entity.""" camera = _get_camera_from_entity_id(hass, entity_id) return await _async_stream_endpoint_url(hass, camera, fmt) @bind_hass async def async_get_image( hass: HomeAssistant, entity_id: str, timeout: int = 10 ) -> Image: """Fetch an image from a camera entity.""" camera = _get_camera_from_entity_id(hass, entity_id) with suppress(asyncio.CancelledError, asyncio.TimeoutError): async with async_timeout.timeout(timeout): image = await camera.async_camera_image() if image: return Image(camera.content_type, image) raise HomeAssistantError("Unable to get image") @bind_hass async def async_get_stream_source(hass: HomeAssistant, entity_id: str) -> str | None: """Fetch the stream source for a camera entity.""" camera = _get_camera_from_entity_id(hass, entity_id) return await camera.stream_source() @bind_hass async def async_get_mjpeg_stream( hass: HomeAssistant, request: web.Request, entity_id: str ) -> web.StreamResponse | None: """Fetch an mjpeg stream from a camera entity.""" camera = _get_camera_from_entity_id(hass, entity_id) return await camera.handle_async_mjpeg_stream(request) async def async_get_still_stream( request: web.Request, image_cb: Callable[[], Awaitable[bytes | None]], content_type: str, interval: float, ) -> web.StreamResponse: """Generate an HTTP MJPEG stream from camera images. This method must be run in the event loop. """ response = web.StreamResponse() response.content_type = CONTENT_TYPE_MULTIPART.format("--frameboundary") await response.prepare(request) async def write_to_mjpeg_stream(img_bytes: bytes) -> None: """Write image to stream.""" await response.write( bytes( "--frameboundary\r\n" "Content-Type: {}\r\n" "Content-Length: {}\r\n\r\n".format(content_type, len(img_bytes)), "utf-8", ) + img_bytes + b"\r\n" ) last_image = None while True: img_bytes = await image_cb() if not img_bytes: break if img_bytes != last_image: await write_to_mjpeg_stream(img_bytes) # Chrome seems to always ignore first picture, # print it twice. if last_image is None: await write_to_mjpeg_stream(img_bytes) last_image = img_bytes await asyncio.sleep(interval) return response def _get_camera_from_entity_id(hass: HomeAssistant, entity_id: str) -> Camera: """Get camera component from entity_id.""" component = hass.data.get(DOMAIN) if component is None: raise HomeAssistantError("Camera integration not set up") camera = component.get_entity(entity_id) if camera is None: raise HomeAssistantError("Camera not found") if not camera.is_on: raise HomeAssistantError("Camera is off") return cast(Camera, camera) async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the camera component.""" component = hass.data[DOMAIN] = EntityComponent( _LOGGER, DOMAIN, hass, SCAN_INTERVAL ) prefs = CameraPreferences(hass) await prefs.async_initialize() hass.data[DATA_CAMERA_PREFS] = prefs hass.http.register_view(CameraImageView(component)) hass.http.register_view(CameraMjpegStream(component)) hass.components.websocket_api.async_register_command( WS_TYPE_CAMERA_THUMBNAIL, websocket_camera_thumbnail, SCHEMA_WS_CAMERA_THUMBNAIL ) hass.components.websocket_api.async_register_command(ws_camera_stream) hass.components.websocket_api.async_register_command(websocket_get_prefs) hass.components.websocket_api.async_register_command(websocket_update_prefs) await component.async_setup(config) async def preload_stream(_event: Event) -> None: for camera in component.entities: camera = cast(Camera, camera) camera_prefs = prefs.get(camera.entity_id) if not camera_prefs.preload_stream: continue stream = await camera.create_stream() if not stream: continue stream.keepalive = True stream.add_provider("hls") stream.start() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, preload_stream) @callback def update_tokens(time: datetime) -> None: """Update tokens of the entities.""" for entity in component.entities: entity = cast(Camera, entity) entity.async_update_token() entity.async_write_ha_state() hass.helpers.event.async_track_time_interval(update_tokens, TOKEN_CHANGE_INTERVAL) component.async_register_entity_service( SERVICE_ENABLE_MOTION, {}, "async_enable_motion_detection" ) component.async_register_entity_service( SERVICE_DISABLE_MOTION, {}, "async_disable_motion_detection" ) component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off") component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on") component.async_register_entity_service( SERVICE_SNAPSHOT, CAMERA_SERVICE_SNAPSHOT, async_handle_snapshot_service ) component.async_register_entity_service( SERVICE_PLAY_STREAM, CAMERA_SERVICE_PLAY_STREAM, async_handle_play_stream_service, ) component.async_register_entity_service( SERVICE_RECORD, CAMERA_SERVICE_RECORD, async_handle_record_service ) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up a config entry.""" component: EntityComponent = hass.data[DOMAIN] return await component.async_setup_entry(entry) async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" component: EntityComponent = hass.data[DOMAIN] return await component.async_unload_entry(entry) class Camera(Entity): """The base class for camera entities.""" def __init__(self) -> None: """Initialize a camera.""" self.is_streaming: bool = False self.stream: Stream | None = None self.stream_options: dict[str, str] = {} self.content_type: str = DEFAULT_CONTENT_TYPE self.access_tokens: collections.deque = collections.deque([], 2) self.async_update_token() @property def should_poll(self) -> bool: """No need to poll cameras.""" return False @property def entity_picture(self) -> str: """Return a link to the camera feed as entity picture.""" return ENTITY_IMAGE_URL.format(self.entity_id, self.access_tokens[-1]) @property def supported_features(self) -> int: """Flag supported features.""" return 0 @property def is_recording(self) -> bool: """Return true if the device is recording.""" return False @property def brand(self) -> str | None: """Return the camera brand.""" return None @property def motion_detection_enabled(self) -> bool: """Return the camera motion detection status.""" return False @property def model(self) -> str | None: """Return the camera model.""" return None @property def frame_interval(self) -> float: """Return the interval between frames of the mjpeg stream.""" return MIN_STREAM_INTERVAL async def create_stream(self) -> Stream | None: """Create a Stream for stream_source.""" # There is at most one stream (a decode worker) per camera if not self.stream: async with async_timeout.timeout(CAMERA_STREAM_SOURCE_TIMEOUT): source = await self.stream_source() if not source: return None self.stream = create_stream(self.hass, source, options=self.stream_options) return self.stream async def stream_source(self) -> str | None: """Return the source of the stream.""" return None def camera_image(self) -> bytes | None: """Return bytes of camera image.""" raise NotImplementedError() async def async_camera_image(self) -> bytes | None: """Return bytes of camera image.""" return await self.hass.async_add_executor_job(self.camera_image) async def handle_async_still_stream( self, request: web.Request, interval: float ) -> web.StreamResponse: """Generate an HTTP MJPEG stream from camera images.""" return await async_get_still_stream( request, self.async_camera_image, self.content_type, interval ) async def handle_async_mjpeg_stream( self, request: web.Request ) -> web.StreamResponse | None: """Serve an HTTP MJPEG stream from the camera. This method can be overridden by camera platforms to proxy a direct stream from the camera. """ return await self.handle_async_still_stream(request, self.frame_interval) @property def state(self) -> str: """Return the camera state.""" if self.is_recording: return STATE_RECORDING if self.is_streaming: return STATE_STREAMING return STATE_IDLE @property def is_on(self) -> bool: """Return true if on.""" return True def turn_off(self) -> None: """Turn off camera.""" raise NotImplementedError() async def async_turn_off(self) -> None: """Turn off camera.""" await self.hass.async_add_executor_job(self.turn_off) def turn_on(self) -> None: """Turn off camera.""" raise NotImplementedError() async def async_turn_on(self) -> None: """Turn off camera.""" await self.hass.async_add_executor_job(self.turn_on) def enable_motion_detection(self) -> None: """Enable motion detection in the camera.""" raise NotImplementedError() async def async_enable_motion_detection(self) -> None: """Call the job and enable motion detection.""" await self.hass.async_add_executor_job(self.enable_motion_detection) def disable_motion_detection(self) -> None: """Disable motion detection in camera.""" raise NotImplementedError() async def async_disable_motion_detection(self) -> None: """Call the job and disable motion detection.""" await self.hass.async_add_executor_job(self.disable_motion_detection) @final @property def state_attributes(self) -> dict[str, str | None]: """Return the camera state attributes.""" attrs = {"access_token": self.access_tokens[-1]} if self.model: attrs["model_name"] = self.model if self.brand: attrs["brand"] = self.brand if self.motion_detection_enabled: attrs["motion_detection"] = self.motion_detection_enabled return attrs @callback def async_update_token(self) -> None: """Update the used token.""" self.access_tokens.append( hashlib.sha256(_RND.getrandbits(256).to_bytes(32, "little")).hexdigest() ) class CameraView(HomeAssistantView): """Base CameraView.""" requires_auth = False def __init__(self, component: EntityComponent) -> None: """Initialize a basic camera view.""" self.component = component async def get(self, request: web.Request, entity_id: str) -> web.StreamResponse: """Start a GET request.""" camera = self.component.get_entity(entity_id) if camera is None: raise web.HTTPNotFound() camera = cast(Camera, camera) authenticated = ( request[KEY_AUTHENTICATED] or request.query.get("token") in camera.access_tokens ) if not authenticated: raise web.HTTPUnauthorized() if not camera.is_on: _LOGGER.debug("Camera is off") raise web.HTTPServiceUnavailable() return await self.handle(request, camera) async def handle(self, request: web.Request, camera: Camera) -> web.StreamResponse: """Handle the camera request.""" raise NotImplementedError() class CameraImageView(CameraView): """Camera view to serve an image.""" url = "/api/camera_proxy/{entity_id}" name = "api:camera:image" async def handle(self, request: web.Request, camera: Camera) -> web.Response: """Serve camera image.""" with suppress(asyncio.CancelledError, asyncio.TimeoutError): async with async_timeout.timeout(CAMERA_IMAGE_TIMEOUT): image = await camera.async_camera_image() if image: return web.Response(body=image, content_type=camera.content_type) raise web.HTTPInternalServerError() class CameraMjpegStream(CameraView): """Camera View to serve an MJPEG stream.""" url = "/api/camera_proxy_stream/{entity_id}" name = "api:camera:stream" async def handle(self, request: web.Request, camera: Camera) -> web.StreamResponse: """Serve camera stream, possibly with interval.""" interval_str = request.query.get("interval") if interval_str is None: stream = await camera.handle_async_mjpeg_stream(request) if stream is None: raise web.HTTPBadGateway() return stream try: # Compose camera stream from stills interval = float(interval_str) if interval < MIN_STREAM_INTERVAL: raise ValueError(f"Stream interval must be be > {MIN_STREAM_INTERVAL}") return await camera.handle_async_still_stream(request, interval) except ValueError as err: raise web.HTTPBadRequest() from err @websocket_api.async_response async def websocket_camera_thumbnail( hass: HomeAssistant, connection: ActiveConnection, msg: dict ) -> None: """Handle get camera thumbnail websocket command. Async friendly. """ _LOGGER.warning("The websocket command 'camera_thumbnail' has been deprecated") try: image = await async_get_image(hass, msg["entity_id"]) await connection.send_big_result( msg["id"], { "content_type": image.content_type, "content": base64.b64encode(image.content).decode("utf-8"), }, ) except HomeAssistantError: connection.send_message( websocket_api.error_message( msg["id"], "image_fetch_failed", "Unable to fetch image" ) ) @websocket_api.websocket_command( { vol.Required("type"): "camera/stream", vol.Required("entity_id"): cv.entity_id, vol.Optional("format", default="hls"): vol.In(OUTPUT_FORMATS), } ) @websocket_api.async_response async def ws_camera_stream( hass: HomeAssistant, connection: ActiveConnection, msg: dict ) -> None: """Handle get camera stream websocket command. Async friendly. """ try: entity_id = msg["entity_id"] camera = _get_camera_from_entity_id(hass, entity_id) url = await _async_stream_endpoint_url(hass, camera, fmt=msg["format"]) connection.send_result(msg["id"], {"url": url}) except HomeAssistantError as ex: _LOGGER.error("Error requesting stream: %s", ex) connection.send_error(msg["id"], "start_stream_failed", str(ex)) except asyncio.TimeoutError: _LOGGER.error("Timeout getting stream source") connection.send_error( msg["id"], "start_stream_failed", "Timeout getting stream source" ) @websocket_api.websocket_command( {vol.Required("type"): "camera/get_prefs", vol.Required("entity_id"): cv.entity_id} ) @websocket_api.async_response async def websocket_get_prefs( hass: HomeAssistant, connection: ActiveConnection, msg: dict ) -> None: """Handle request for account info.""" prefs = hass.data[DATA_CAMERA_PREFS].get(msg["entity_id"]) connection.send_result(msg["id"], prefs.as_dict()) @websocket_api.websocket_command( { vol.Required("type"): "camera/update_prefs", vol.Required("entity_id"): cv.entity_id, vol.Optional("preload_stream"): bool, } ) @websocket_api.async_response async def websocket_update_prefs( hass: HomeAssistant, connection: ActiveConnection, msg: dict ) -> None: """Handle request for account info.""" prefs = hass.data[DATA_CAMERA_PREFS] changes = dict(msg) changes.pop("id") changes.pop("type") entity_id = changes.pop("entity_id") await prefs.async_update(entity_id, **changes) connection.send_result(msg["id"], prefs.get(entity_id).as_dict()) async def async_handle_snapshot_service( camera: Camera, service_call: ServiceCall ) -> None: """Handle snapshot services calls.""" hass = camera.hass filename = service_call.data[ATTR_FILENAME] filename.hass = hass snapshot_file = filename.async_render(variables={ATTR_ENTITY_ID: camera}) # check if we allow to access to that file if not hass.config.is_allowed_path(snapshot_file): _LOGGER.error("Can't write %s, no access to path!", snapshot_file) return image = await camera.async_camera_image() def _write_image(to_file: str, image_data: bytes | None) -> None: """Executor helper to write image.""" if image_data is None: return if not os.path.exists(os.path.dirname(to_file)): os.makedirs(os.path.dirname(to_file), exist_ok=True) with open(to_file, "wb") as img_file: img_file.write(image_data) try: await hass.async_add_executor_job(_write_image, snapshot_file, image) except OSError as err: _LOGGER.error("Can't write image to file: %s", err) async def async_handle_play_stream_service( camera: Camera, service_call: ServiceCall ) -> None: """Handle play stream services calls.""" fmt = service_call.data[ATTR_FORMAT] url = await _async_stream_endpoint_url(camera.hass, camera, fmt) hass = camera.hass data: Mapping[str, str] = { ATTR_MEDIA_CONTENT_ID: f"{get_url(hass)}{url}", ATTR_MEDIA_CONTENT_TYPE: FORMAT_CONTENT_TYPE[fmt], } # It is required to send a different payload for cast media players entity_ids = service_call.data[ATTR_MEDIA_PLAYER] sources = entity_sources(hass) cast_entity_ids = [ entity for entity in entity_ids # All entities should be in sources. This extra guard is to # avoid people writing to the state machine and breaking it. if entity in sources and sources[entity]["domain"] == "cast" ] other_entity_ids = list(set(entity_ids) - set(cast_entity_ids)) if cast_entity_ids: await hass.services.async_call( DOMAIN_MP, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: cast_entity_ids, **data, ATTR_MEDIA_EXTRA: { "stream_type": "LIVE", "media_info": { "hlsVideoSegmentFormat": "fmp4", }, }, }, blocking=True, context=service_call.context, ) if other_entity_ids: await hass.services.async_call( DOMAIN_MP, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: other_entity_ids, **data, }, blocking=True, context=service_call.context, ) async def _async_stream_endpoint_url( hass: HomeAssistant, camera: Camera, fmt: str ) -> str: stream = await camera.create_stream() if not stream: raise HomeAssistantError( f"{camera.entity_id} does not support play stream service" ) # Update keepalive setting which manages idle shutdown camera_prefs = hass.data[DATA_CAMERA_PREFS].get(camera.entity_id) stream.keepalive = camera_prefs.preload_stream stream.add_provider(fmt) stream.start() return stream.endpoint_url(fmt) async def async_handle_record_service( camera: Camera, service_call: ServiceCall ) -> None: """Handle stream recording service calls.""" stream = await camera.create_stream() if not stream: raise HomeAssistantError(f"{camera.entity_id} does not support record service") hass = camera.hass filename = service_call.data[CONF_FILENAME] filename.hass = hass video_path = filename.async_render(variables={ATTR_ENTITY_ID: camera}) await stream.async_record( video_path, duration=service_call.data[CONF_DURATION], lookback=service_call.data[CONF_LOOKBACK], )
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/camera/__init__.py
"""Constants for the devolo_home_control integration.""" import re DOMAIN = "devolo_home_control" DEFAULT_MYDEVOLO = "https://www.mydevolo.com" PLATFORMS = ["binary_sensor", "climate", "cover", "light", "sensor", "switch"] CONF_MYDEVOLO = "mydevolo_url" GATEWAY_SERIAL_PATTERN = re.compile(r"\d{16}") SUPPORTED_MODEL_TYPES = ["2600", "2601"]
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/devolo_home_control/const.py
"""Flock platform for notify component.""" import asyncio import logging import async_timeout import voluptuous as vol from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService from homeassistant.const import CONF_ACCESS_TOKEN, HTTP_OK from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) _RESOURCE = "https://api.flock.com/hooks/sendMessage/" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_ACCESS_TOKEN): cv.string}) async def async_get_service(hass, config, discovery_info=None): """Get the Flock notification service.""" access_token = config.get(CONF_ACCESS_TOKEN) url = f"{_RESOURCE}{access_token}" session = async_get_clientsession(hass) return FlockNotificationService(url, session) class FlockNotificationService(BaseNotificationService): """Implement the notification service for Flock.""" def __init__(self, url, session): """Initialize the Flock notification service.""" self._url = url self._session = session async def async_send_message(self, message, **kwargs): """Send the message to the user.""" payload = {"text": message} _LOGGER.debug("Attempting to call Flock at %s", self._url) try: with async_timeout.timeout(10): response = await self._session.post(self._url, json=payload) result = await response.json() if response.status != HTTP_OK or "error" in result: _LOGGER.error( "Flock service returned HTTP status %d, response %s", response.status, result, ) except asyncio.TimeoutError: _LOGGER.error("Timeout accessing Flock at %s", self._url)
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/flock/notify.py
"""Support for the yandex speechkit tts service.""" import asyncio import logging import aiohttp import async_timeout import voluptuous as vol from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider from homeassistant.const import CONF_API_KEY, HTTP_OK from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) YANDEX_API_URL = "https://tts.voicetech.yandex.net/generate?" SUPPORT_LANGUAGES = ["ru-RU", "en-US", "tr-TR", "uk-UK"] SUPPORT_CODECS = ["mp3", "wav", "opus"] SUPPORT_VOICES = [ "jane", "oksana", "alyss", "omazh", "zahar", "ermil", "levitan", "ermilov", "silaerkan", "kolya", "kostya", "nastya", "sasha", "nick", "erkanyavas", "zhenya", "tanya", "anton_samokhvalov", "tatyana_abramova", "voicesearch", "ermil_with_tuning", "robot", "dude", "zombie", "smoky", ] SUPPORTED_EMOTION = ["good", "evil", "neutral"] MIN_SPEED = 0.1 MAX_SPEED = 3 CONF_CODEC = "codec" CONF_VOICE = "voice" CONF_EMOTION = "emotion" CONF_SPEED = "speed" DEFAULT_LANG = "en-US" DEFAULT_CODEC = "mp3" DEFAULT_VOICE = "zahar" DEFAULT_EMOTION = "neutral" DEFAULT_SPEED = 1 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES), vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODECS), vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORT_VOICES), vol.Optional(CONF_EMOTION, default=DEFAULT_EMOTION): vol.In(SUPPORTED_EMOTION), vol.Optional(CONF_SPEED, default=DEFAULT_SPEED): vol.Range( min=MIN_SPEED, max=MAX_SPEED ), } ) SUPPORTED_OPTIONS = [CONF_CODEC, CONF_VOICE, CONF_EMOTION, CONF_SPEED] async def async_get_engine(hass, config, discovery_info=None): """Set up VoiceRSS speech component.""" return YandexSpeechKitProvider(hass, config) class YandexSpeechKitProvider(Provider): """VoiceRSS speech api provider.""" def __init__(self, hass, conf): """Init VoiceRSS TTS service.""" self.hass = hass self._codec = conf.get(CONF_CODEC) self._key = conf.get(CONF_API_KEY) self._speaker = conf.get(CONF_VOICE) self._language = conf.get(CONF_LANG) self._emotion = conf.get(CONF_EMOTION) self._speed = str(conf.get(CONF_SPEED)) self.name = "YandexTTS" @property def default_language(self): """Return the default language.""" return self._language @property def supported_languages(self): """Return list of supported languages.""" return SUPPORT_LANGUAGES @property def supported_options(self): """Return list of supported options.""" return SUPPORTED_OPTIONS async def async_get_tts_audio(self, message, language, options=None): """Load TTS from yandex.""" websession = async_get_clientsession(self.hass) actual_language = language options = options or {} try: with async_timeout.timeout(10): url_param = { "text": message, "lang": actual_language, "key": self._key, "speaker": options.get(CONF_VOICE, self._speaker), "format": options.get(CONF_CODEC, self._codec), "emotion": options.get(CONF_EMOTION, self._emotion), "speed": options.get(CONF_SPEED, self._speed), } request = await websession.get(YANDEX_API_URL, params=url_param) if request.status != HTTP_OK: _LOGGER.error( "Error %d on load URL %s", request.status, request.url ) return (None, None) data = await request.read() except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Timeout for yandex speech kit API") return (None, None) return (self._codec, data)
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/yandextts/tts.py
"""Config flow for AlarmDecoder.""" import logging from adext import AdExt from alarmdecoder.devices import SerialDevice, SocketDevice from alarmdecoder.util import NoDeviceError import voluptuous as vol from homeassistant import config_entries from homeassistant.components.binary_sensor import DEVICE_CLASSES from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PROTOCOL from homeassistant.core import callback from .const import ( CONF_ALT_NIGHT_MODE, CONF_AUTO_BYPASS, CONF_CODE_ARM_REQUIRED, CONF_DEVICE_BAUD, CONF_DEVICE_PATH, CONF_RELAY_ADDR, CONF_RELAY_CHAN, CONF_ZONE_LOOP, CONF_ZONE_NAME, CONF_ZONE_NUMBER, CONF_ZONE_RFID, CONF_ZONE_TYPE, DEFAULT_ARM_OPTIONS, DEFAULT_DEVICE_BAUD, DEFAULT_DEVICE_HOST, DEFAULT_DEVICE_PATH, DEFAULT_DEVICE_PORT, DEFAULT_ZONE_OPTIONS, DEFAULT_ZONE_TYPE, DOMAIN, OPTIONS_ARM, OPTIONS_ZONES, PROTOCOL_SERIAL, PROTOCOL_SOCKET, ) EDIT_KEY = "edit_selection" EDIT_ZONES = "Zones" EDIT_SETTINGS = "Arming Settings" _LOGGER = logging.getLogger(__name__) class AlarmDecoderFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a AlarmDecoder config flow.""" VERSION = 1 def __init__(self): """Initialize AlarmDecoder ConfigFlow.""" self.protocol = None @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for AlarmDecoder.""" return AlarmDecoderOptionsFlowHandler(config_entry) async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" if user_input is not None: self.protocol = user_input[CONF_PROTOCOL] return await self.async_step_protocol() return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_PROTOCOL): vol.In( [PROTOCOL_SOCKET, PROTOCOL_SERIAL] ), } ), ) async def async_step_protocol(self, user_input=None): """Handle AlarmDecoder protocol setup.""" errors = {} if user_input is not None: if _device_already_added( self._async_current_entries(), user_input, self.protocol ): return self.async_abort(reason="already_configured") connection = {} baud = None if self.protocol == PROTOCOL_SOCKET: host = connection[CONF_HOST] = user_input[CONF_HOST] port = connection[CONF_PORT] = user_input[CONF_PORT] title = f"{host}:{port}" device = SocketDevice(interface=(host, port)) if self.protocol == PROTOCOL_SERIAL: path = connection[CONF_DEVICE_PATH] = user_input[CONF_DEVICE_PATH] baud = connection[CONF_DEVICE_BAUD] = user_input[CONF_DEVICE_BAUD] title = path device = SerialDevice(interface=path) controller = AdExt(device) def test_connection(): controller.open(baud) controller.close() try: await self.hass.async_add_executor_job(test_connection) return self.async_create_entry( title=title, data={CONF_PROTOCOL: self.protocol, **connection} ) except NoDeviceError: errors["base"] = "cannot_connect" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception during AlarmDecoder setup") errors["base"] = "unknown" if self.protocol == PROTOCOL_SOCKET: schema = vol.Schema( { vol.Required(CONF_HOST, default=DEFAULT_DEVICE_HOST): str, vol.Required(CONF_PORT, default=DEFAULT_DEVICE_PORT): int, } ) if self.protocol == PROTOCOL_SERIAL: schema = vol.Schema( { vol.Required(CONF_DEVICE_PATH, default=DEFAULT_DEVICE_PATH): str, vol.Required(CONF_DEVICE_BAUD, default=DEFAULT_DEVICE_BAUD): int, } ) return self.async_show_form( step_id="protocol", data_schema=schema, errors=errors, ) class AlarmDecoderOptionsFlowHandler(config_entries.OptionsFlow): """Handle AlarmDecoder options.""" def __init__(self, config_entry: config_entries.ConfigEntry) -> None: """Initialize AlarmDecoder options flow.""" self.arm_options = config_entry.options.get(OPTIONS_ARM, DEFAULT_ARM_OPTIONS) self.zone_options = config_entry.options.get( OPTIONS_ZONES, DEFAULT_ZONE_OPTIONS ) self.selected_zone = None async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: if user_input[EDIT_KEY] == EDIT_SETTINGS: return await self.async_step_arm_settings() if user_input[EDIT_KEY] == EDIT_ZONES: return await self.async_step_zone_select() return self.async_show_form( step_id="init", data_schema=vol.Schema( { vol.Required(EDIT_KEY, default=EDIT_SETTINGS): vol.In( [EDIT_SETTINGS, EDIT_ZONES] ) }, ), ) async def async_step_arm_settings(self, user_input=None): """Arming options form.""" if user_input is not None: return self.async_create_entry( title="", data={OPTIONS_ARM: user_input, OPTIONS_ZONES: self.zone_options}, ) return self.async_show_form( step_id="arm_settings", data_schema=vol.Schema( { vol.Optional( CONF_ALT_NIGHT_MODE, default=self.arm_options[CONF_ALT_NIGHT_MODE], ): bool, vol.Optional( CONF_AUTO_BYPASS, default=self.arm_options[CONF_AUTO_BYPASS] ): bool, vol.Optional( CONF_CODE_ARM_REQUIRED, default=self.arm_options[CONF_CODE_ARM_REQUIRED], ): bool, }, ), ) async def async_step_zone_select(self, user_input=None): """Zone selection form.""" errors = _validate_zone_input(user_input) if user_input is not None and not errors: self.selected_zone = str( int(user_input[CONF_ZONE_NUMBER]) ) # remove leading zeros return await self.async_step_zone_details() return self.async_show_form( step_id="zone_select", data_schema=vol.Schema({vol.Required(CONF_ZONE_NUMBER): str}), errors=errors, ) async def async_step_zone_details(self, user_input=None): """Zone details form.""" errors = _validate_zone_input(user_input) if user_input is not None and not errors: zone_options = self.zone_options.copy() zone_id = self.selected_zone zone_options[zone_id] = _fix_input_types(user_input) # Delete zone entry if zone_name is omitted if CONF_ZONE_NAME not in zone_options[zone_id]: zone_options.pop(zone_id) return self.async_create_entry( title="", data={OPTIONS_ARM: self.arm_options, OPTIONS_ZONES: zone_options}, ) existing_zone_settings = self.zone_options.get(self.selected_zone, {}) return self.async_show_form( step_id="zone_details", description_placeholders={CONF_ZONE_NUMBER: self.selected_zone}, data_schema=vol.Schema( { vol.Optional( CONF_ZONE_NAME, description={ "suggested_value": existing_zone_settings.get( CONF_ZONE_NAME ) }, ): str, vol.Optional( CONF_ZONE_TYPE, default=existing_zone_settings.get( CONF_ZONE_TYPE, DEFAULT_ZONE_TYPE ), ): vol.In(DEVICE_CLASSES), vol.Optional( CONF_ZONE_RFID, description={ "suggested_value": existing_zone_settings.get( CONF_ZONE_RFID ) }, ): str, vol.Optional( CONF_ZONE_LOOP, description={ "suggested_value": existing_zone_settings.get( CONF_ZONE_LOOP ) }, ): str, vol.Optional( CONF_RELAY_ADDR, description={ "suggested_value": existing_zone_settings.get( CONF_RELAY_ADDR ) }, ): str, vol.Optional( CONF_RELAY_CHAN, description={ "suggested_value": existing_zone_settings.get( CONF_RELAY_CHAN ) }, ): str, } ), errors=errors, ) def _validate_zone_input(zone_input): if not zone_input: return {} errors = {} # CONF_RELAY_ADDR & CONF_RELAY_CHAN are inclusive if (CONF_RELAY_ADDR in zone_input and CONF_RELAY_CHAN not in zone_input) or ( CONF_RELAY_ADDR not in zone_input and CONF_RELAY_CHAN in zone_input ): errors["base"] = "relay_inclusive" # The following keys must be int for key in [CONF_ZONE_NUMBER, CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]: if key in zone_input: try: int(zone_input[key]) except ValueError: errors[key] = "int" # CONF_ZONE_LOOP depends on CONF_ZONE_RFID if CONF_ZONE_LOOP in zone_input and CONF_ZONE_RFID not in zone_input: errors[CONF_ZONE_LOOP] = "loop_rfid" # CONF_ZONE_LOOP must be 1-4 if ( CONF_ZONE_LOOP in zone_input and zone_input[CONF_ZONE_LOOP].isdigit() and int(zone_input[CONF_ZONE_LOOP]) not in list(range(1, 5)) ): errors[CONF_ZONE_LOOP] = "loop_range" return errors def _fix_input_types(zone_input): """Convert necessary keys to int. Since ConfigFlow inputs of type int cannot default to an empty string, we collect the values below as strings and then convert them to ints. """ for key in [CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]: if key in zone_input: zone_input[key] = int(zone_input[key]) return zone_input def _device_already_added(current_entries, user_input, protocol): """Determine if entry has already been added to HA.""" user_host = user_input.get(CONF_HOST) user_port = user_input.get(CONF_PORT) user_path = user_input.get(CONF_DEVICE_PATH) user_baud = user_input.get(CONF_DEVICE_BAUD) for entry in current_entries: entry_host = entry.data.get(CONF_HOST) entry_port = entry.data.get(CONF_PORT) entry_path = entry.data.get(CONF_DEVICE_PATH) entry_baud = entry.data.get(CONF_DEVICE_BAUD) if ( protocol == PROTOCOL_SOCKET and user_host == entry_host and user_port == entry_port ): return True if ( protocol == PROTOCOL_SERIAL and user_baud == entry_baud and user_path == entry_path ): return True return False
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/alarmdecoder/config_flow.py
"""Config flow for Litter-Robot integration.""" import logging from pylitterbot.exceptions import LitterRobotException, LitterRobotLoginException import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from .const import DOMAIN from .hub import LitterRobotHub _LOGGER = logging.getLogger(__name__) STEP_USER_DATA_SCHEMA = vol.Schema( {vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str} ) class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for Litter-Robot.""" VERSION = 1 async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: self._async_abort_entries_match({CONF_USERNAME: user_input[CONF_USERNAME]}) hub = LitterRobotHub(self.hass, user_input) try: await hub.login() except LitterRobotLoginException: errors["base"] = "invalid_auth" except LitterRobotException: errors["base"] = "cannot_connect" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" if not errors: return self.async_create_entry( title=user_input[CONF_USERNAME], data=user_input ) return self.async_show_form( step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors )
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/litterrobot/config_flow.py
"""API for Honeywell Lyric bound to Home Assistant OAuth.""" import logging from typing import cast from aiohttp import BasicAuth, ClientSession from aiolyric.client import LyricClient from homeassistant.helpers import config_entry_oauth2_flow from homeassistant.helpers.aiohttp_client import async_get_clientsession _LOGGER = logging.getLogger(__name__) class ConfigEntryLyricClient(LyricClient): """Provide Honeywell Lyric authentication tied to an OAuth2 based config entry.""" def __init__( self, websession: ClientSession, oauth_session: config_entry_oauth2_flow.OAuth2Session, ) -> None: """Initialize Honeywell Lyric auth.""" super().__init__(websession) self._oauth_session = oauth_session async def async_get_access_token(self): """Return a valid access token.""" if not self._oauth_session.valid_token: await self._oauth_session.async_ensure_token_valid() return self._oauth_session.token["access_token"] class LyricLocalOAuth2Implementation( config_entry_oauth2_flow.LocalOAuth2Implementation ): """Lyric Local OAuth2 implementation.""" async def _token_request(self, data: dict) -> dict: """Make a token request.""" session = async_get_clientsession(self.hass) data["client_id"] = self.client_id if self.client_secret is not None: data["client_secret"] = self.client_secret headers = { "Authorization": BasicAuth(self.client_id, self.client_secret).encode(), "Content-Type": "application/x-www-form-urlencoded", } resp = await session.post(self.token_url, headers=headers, data=data) resp.raise_for_status() return cast(dict, await resp.json())
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/lyric/api.py
"""Support for Vera thermostats.""" from __future__ import annotations from typing import Any import pyvera as veraApi from homeassistant.components.climate import ( DOMAIN as PLATFORM_DOMAIN, ENTITY_ID_FORMAT, ClimateEntity, ) from homeassistant.components.climate.const import ( FAN_AUTO, FAN_ON, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, SUPPORT_FAN_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.util import convert from . import VeraDevice from .common import ControllerData, get_controller_data FAN_OPERATION_LIST = [FAN_ON, FAN_AUTO] SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE SUPPORT_HVAC = [HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF] async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the sensor config entry.""" controller_data = get_controller_data(hass, entry) async_add_entities( [ VeraThermostat(device, controller_data) for device in controller_data.devices.get(PLATFORM_DOMAIN) ], True, ) class VeraThermostat(VeraDevice[veraApi.VeraThermostat], ClimateEntity): """Representation of a Vera Thermostat.""" def __init__( self, vera_device: veraApi.VeraThermostat, controller_data: ControllerData ) -> None: """Initialize the Vera device.""" VeraDevice.__init__(self, vera_device, controller_data) self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id) @property def supported_features(self) -> int | None: """Return the list of supported features.""" return SUPPORT_FLAGS @property def hvac_mode(self) -> str: """Return hvac operation ie. heat, cool mode. Need to be one of HVAC_MODE_*. """ mode = self.vera_device.get_hvac_mode() if mode == "HeatOn": return HVAC_MODE_HEAT if mode == "CoolOn": return HVAC_MODE_COOL if mode == "AutoChangeOver": return HVAC_MODE_HEAT_COOL return HVAC_MODE_OFF @property def hvac_modes(self) -> list[str]: """Return the list of available hvac operation modes. Need to be a subset of HVAC_MODES. """ return SUPPORT_HVAC @property def fan_mode(self) -> str | None: """Return the fan setting.""" mode = self.vera_device.get_fan_mode() if mode == "ContinuousOn": return FAN_ON return FAN_AUTO @property def fan_modes(self) -> list[str] | None: """Return a list of available fan modes.""" return FAN_OPERATION_LIST def set_fan_mode(self, fan_mode) -> None: """Set new target temperature.""" if fan_mode == FAN_ON: self.vera_device.fan_on() else: self.vera_device.fan_auto() self.schedule_update_ha_state() @property def current_power_w(self) -> float | None: """Return the current power usage in W.""" power = self.vera_device.power if power: return convert(power, float, 0.0) @property def temperature_unit(self) -> str: """Return the unit of measurement.""" vera_temp_units = self.vera_device.vera_controller.temperature_units if vera_temp_units == "F": return TEMP_FAHRENHEIT return TEMP_CELSIUS @property def current_temperature(self) -> float | None: """Return the current temperature.""" return self.vera_device.get_current_temperature() @property def operation(self) -> str: """Return current operation ie. heat, cool, idle.""" return self.vera_device.get_hvac_mode() @property def target_temperature(self) -> float | None: """Return the temperature we try to reach.""" return self.vera_device.get_current_goal_temperature() def set_temperature(self, **kwargs: Any) -> None: """Set new target temperatures.""" if kwargs.get(ATTR_TEMPERATURE) is not None: self.vera_device.set_temperature(kwargs.get(ATTR_TEMPERATURE)) self.schedule_update_ha_state() def set_hvac_mode(self, hvac_mode) -> None: """Set new target hvac mode.""" if hvac_mode == HVAC_MODE_OFF: self.vera_device.turn_off() elif hvac_mode == HVAC_MODE_HEAT_COOL: self.vera_device.turn_auto_on() elif hvac_mode == HVAC_MODE_COOL: self.vera_device.turn_cool_on() elif hvac_mode == HVAC_MODE_HEAT: self.vera_device.turn_heat_on() self.schedule_update_ha_state()
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/vera/climate.py
"""This component provides HA sensor support for Ring Door Bell/Chimes.""" from homeassistant.components.sensor import SensorEntity from homeassistant.const import ( DEVICE_CLASS_TIMESTAMP, PERCENTAGE, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import callback from homeassistant.helpers.icon import icon_for_battery_level from . import DOMAIN from .entity import RingEntityMixin async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a sensor for a Ring device.""" devices = hass.data[DOMAIN][config_entry.entry_id]["devices"] sensors = [] for device_type in ("chimes", "doorbots", "authorized_doorbots", "stickup_cams"): for sensor_type in SENSOR_TYPES: if device_type not in SENSOR_TYPES[sensor_type][1]: continue for device in devices[device_type]: if device_type == "battery" and device.battery_life is None: continue sensors.append( SENSOR_TYPES[sensor_type][6]( config_entry.entry_id, device, sensor_type ) ) async_add_entities(sensors) class RingSensor(RingEntityMixin, SensorEntity): """A sensor implementation for Ring device.""" def __init__(self, config_entry_id, device, sensor_type): """Initialize a sensor for Ring device.""" super().__init__(config_entry_id, device) self._sensor_type = sensor_type self._extra = None self._icon = f"mdi:{SENSOR_TYPES.get(sensor_type)[3]}" self._kind = SENSOR_TYPES.get(sensor_type)[4] self._name = f"{self._device.name} {SENSOR_TYPES.get(sensor_type)[0]}" self._unique_id = f"{device.id}-{sensor_type}" @property def should_poll(self): """Return False, updates are controlled via the hub.""" return False @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" if self._sensor_type == "volume": return self._device.volume if self._sensor_type == "battery": return self._device.battery_life @property def unique_id(self): """Return a unique ID.""" return self._unique_id @property def device_class(self): """Return sensor device class.""" return SENSOR_TYPES[self._sensor_type][5] @property def icon(self): """Icon to use in the frontend, if any.""" if self._sensor_type == "battery" and self._device.battery_life is not None: return icon_for_battery_level( battery_level=self._device.battery_life, charging=False ) return self._icon @property def unit_of_measurement(self): """Return the units of measurement.""" return SENSOR_TYPES.get(self._sensor_type)[2] class HealthDataRingSensor(RingSensor): """Ring sensor that relies on health data.""" async def async_added_to_hass(self): """Register callbacks.""" await super().async_added_to_hass() await self.ring_objects["health_data"].async_track_device( self._device, self._health_update_callback ) async def async_will_remove_from_hass(self): """Disconnect callbacks.""" await super().async_will_remove_from_hass() self.ring_objects["health_data"].async_untrack_device( self._device, self._health_update_callback ) @callback def _health_update_callback(self, _health_data): """Call update method.""" self.async_write_ha_state() @property def entity_registry_enabled_default(self) -> bool: """Return if the entity should be enabled when first added to the entity registry.""" # These sensors are data hungry and not useful. Disable by default. return False @property def state(self): """Return the state of the sensor.""" if self._sensor_type == "wifi_signal_category": return self._device.wifi_signal_category if self._sensor_type == "wifi_signal_strength": return self._device.wifi_signal_strength class HistoryRingSensor(RingSensor): """Ring sensor that relies on history data.""" _latest_event = None async def async_added_to_hass(self): """Register callbacks.""" await super().async_added_to_hass() await self.ring_objects["history_data"].async_track_device( self._device, self._history_update_callback ) async def async_will_remove_from_hass(self): """Disconnect callbacks.""" await super().async_will_remove_from_hass() self.ring_objects["history_data"].async_untrack_device( self._device, self._history_update_callback ) @callback def _history_update_callback(self, history_data): """Call update method.""" if not history_data: return found = None if self._kind is None: found = history_data[0] else: for entry in history_data: if entry["kind"] == self._kind: found = entry break if not found: return self._latest_event = found self.async_write_ha_state() @property def state(self): """Return the state of the sensor.""" if self._latest_event is None: return None return self._latest_event["created_at"].isoformat() @property def extra_state_attributes(self): """Return the state attributes.""" attrs = super().extra_state_attributes if self._latest_event: attrs["created_at"] = self._latest_event["created_at"] attrs["answered"] = self._latest_event["answered"] attrs["recording_status"] = self._latest_event["recording"]["status"] attrs["category"] = self._latest_event["kind"] return attrs # Sensor types: Name, category, units, icon, kind, device_class, class SENSOR_TYPES = { "battery": [ "Battery", ["doorbots", "authorized_doorbots", "stickup_cams"], PERCENTAGE, None, None, "battery", RingSensor, ], "last_activity": [ "Last Activity", ["doorbots", "authorized_doorbots", "stickup_cams"], None, "history", None, DEVICE_CLASS_TIMESTAMP, HistoryRingSensor, ], "last_ding": [ "Last Ding", ["doorbots", "authorized_doorbots"], None, "history", "ding", DEVICE_CLASS_TIMESTAMP, HistoryRingSensor, ], "last_motion": [ "Last Motion", ["doorbots", "authorized_doorbots", "stickup_cams"], None, "history", "motion", DEVICE_CLASS_TIMESTAMP, HistoryRingSensor, ], "volume": [ "Volume", ["chimes", "doorbots", "authorized_doorbots", "stickup_cams"], None, "bell-ring", None, None, RingSensor, ], "wifi_signal_category": [ "WiFi Signal Category", ["chimes", "doorbots", "authorized_doorbots", "stickup_cams"], None, "wifi", None, None, HealthDataRingSensor, ], "wifi_signal_strength": [ "WiFi Signal Strength", ["chimes", "doorbots", "authorized_doorbots", "stickup_cams"], SIGNAL_STRENGTH_DECIBELS_MILLIWATT, "wifi", None, "signal_strength", HealthDataRingSensor, ], }
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/ring/sensor.py
"""Constants for the Huisbaasje integration.""" from huisbaasje.const import ( SOURCE_TYPE_ELECTRICITY, SOURCE_TYPE_ELECTRICITY_IN, SOURCE_TYPE_ELECTRICITY_IN_LOW, SOURCE_TYPE_ELECTRICITY_OUT, SOURCE_TYPE_ELECTRICITY_OUT_LOW, SOURCE_TYPE_GAS, ) from homeassistant.const import ( DEVICE_CLASS_ENERGY, DEVICE_CLASS_POWER, ENERGY_KILO_WATT_HOUR, TIME_HOURS, VOLUME_CUBIC_METERS, ) DATA_COORDINATOR = "coordinator" DOMAIN = "huisbaasje" FLOW_CUBIC_METERS_PER_HOUR = f"{VOLUME_CUBIC_METERS}/{TIME_HOURS}" """Interval in seconds between polls to huisbaasje.""" POLLING_INTERVAL = 20 """Timeout for fetching sensor data""" FETCH_TIMEOUT = 10 SENSOR_TYPE_RATE = "rate" SENSOR_TYPE_THIS_DAY = "thisDay" SENSOR_TYPE_THIS_WEEK = "thisWeek" SENSOR_TYPE_THIS_MONTH = "thisMonth" SENSOR_TYPE_THIS_YEAR = "thisYear" SOURCE_TYPES = [ SOURCE_TYPE_ELECTRICITY, SOURCE_TYPE_ELECTRICITY_IN, SOURCE_TYPE_ELECTRICITY_IN_LOW, SOURCE_TYPE_ELECTRICITY_OUT, SOURCE_TYPE_ELECTRICITY_OUT_LOW, SOURCE_TYPE_GAS, ] SENSORS_INFO = [ { "name": "Huisbaasje Current Power", "device_class": DEVICE_CLASS_POWER, "source_type": SOURCE_TYPE_ELECTRICITY, }, { "name": "Huisbaasje Current Power In", "device_class": DEVICE_CLASS_POWER, "source_type": SOURCE_TYPE_ELECTRICITY_IN, }, { "name": "Huisbaasje Current Power In Low", "device_class": DEVICE_CLASS_POWER, "source_type": SOURCE_TYPE_ELECTRICITY_IN_LOW, }, { "name": "Huisbaasje Current Power Out", "device_class": DEVICE_CLASS_POWER, "source_type": SOURCE_TYPE_ELECTRICITY_OUT, }, { "name": "Huisbaasje Current Power Out Low", "device_class": DEVICE_CLASS_POWER, "source_type": SOURCE_TYPE_ELECTRICITY_OUT_LOW, }, { "name": "Huisbaasje Energy Today", "device_class": DEVICE_CLASS_ENERGY, "unit_of_measurement": ENERGY_KILO_WATT_HOUR, "source_type": SOURCE_TYPE_ELECTRICITY, "sensor_type": SENSOR_TYPE_THIS_DAY, "precision": 1, }, { "name": "Huisbaasje Energy This Week", "device_class": DEVICE_CLASS_ENERGY, "unit_of_measurement": ENERGY_KILO_WATT_HOUR, "source_type": SOURCE_TYPE_ELECTRICITY, "sensor_type": SENSOR_TYPE_THIS_WEEK, "precision": 1, }, { "name": "Huisbaasje Energy This Month", "device_class": DEVICE_CLASS_ENERGY, "unit_of_measurement": ENERGY_KILO_WATT_HOUR, "source_type": SOURCE_TYPE_ELECTRICITY, "sensor_type": SENSOR_TYPE_THIS_MONTH, "precision": 1, }, { "name": "Huisbaasje Energy This Year", "device_class": DEVICE_CLASS_ENERGY, "unit_of_measurement": ENERGY_KILO_WATT_HOUR, "source_type": SOURCE_TYPE_ELECTRICITY, "sensor_type": SENSOR_TYPE_THIS_YEAR, "precision": 1, }, { "name": "Huisbaasje Current Gas", "unit_of_measurement": FLOW_CUBIC_METERS_PER_HOUR, "source_type": SOURCE_TYPE_GAS, "icon": "mdi:fire", "precision": 1, }, { "name": "Huisbaasje Gas Today", "unit_of_measurement": VOLUME_CUBIC_METERS, "source_type": SOURCE_TYPE_GAS, "sensor_type": SENSOR_TYPE_THIS_DAY, "icon": "mdi:counter", "precision": 1, }, { "name": "Huisbaasje Gas This Week", "unit_of_measurement": VOLUME_CUBIC_METERS, "source_type": SOURCE_TYPE_GAS, "sensor_type": SENSOR_TYPE_THIS_WEEK, "icon": "mdi:counter", "precision": 1, }, { "name": "Huisbaasje Gas This Month", "unit_of_measurement": VOLUME_CUBIC_METERS, "source_type": SOURCE_TYPE_GAS, "sensor_type": SENSOR_TYPE_THIS_MONTH, "icon": "mdi:counter", "precision": 1, }, { "name": "Huisbaasje Gas This Year", "unit_of_measurement": VOLUME_CUBIC_METERS, "source_type": SOURCE_TYPE_GAS, "sensor_type": SENSOR_TYPE_THIS_YEAR, "icon": "mdi:counter", "precision": 1, }, ]
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/huisbaasje/const.py
"""Classes to help gather user submissions.""" from __future__ import annotations import abc import asyncio from collections.abc import Mapping from types import MappingProxyType from typing import Any, TypedDict import uuid import voluptuous as vol from .core import HomeAssistant, callback from .exceptions import HomeAssistantError RESULT_TYPE_FORM = "form" RESULT_TYPE_CREATE_ENTRY = "create_entry" RESULT_TYPE_ABORT = "abort" RESULT_TYPE_EXTERNAL_STEP = "external" RESULT_TYPE_EXTERNAL_STEP_DONE = "external_done" RESULT_TYPE_SHOW_PROGRESS = "progress" RESULT_TYPE_SHOW_PROGRESS_DONE = "progress_done" # Event that is fired when a flow is progressed via external or progress source. EVENT_DATA_ENTRY_FLOW_PROGRESSED = "data_entry_flow_progressed" class FlowError(HomeAssistantError): """Error while configuring an account.""" class UnknownHandler(FlowError): """Unknown handler specified.""" class UnknownFlow(FlowError): """Unknown flow specified.""" class UnknownStep(FlowError): """Unknown step specified.""" class AbortFlow(FlowError): """Exception to indicate a flow needs to be aborted.""" def __init__( self, reason: str, description_placeholders: dict | None = None ) -> None: """Initialize an abort flow exception.""" super().__init__(f"Flow aborted: {reason}") self.reason = reason self.description_placeholders = description_placeholders class FlowResult(TypedDict, total=False): """Typed result dict.""" version: int type: str flow_id: str handler: str title: str data: Mapping[str, Any] step_id: str data_schema: vol.Schema extra: str required: bool errors: dict[str, str] | None description: str | None description_placeholders: dict[str, Any] | None progress_action: str url: str reason: str context: dict[str, Any] result: Any last_step: bool | None options: Mapping[str, Any] class FlowManager(abc.ABC): """Manage all the flows that are in progress.""" def __init__( self, hass: HomeAssistant, ) -> None: """Initialize the flow manager.""" self.hass = hass self._initializing: dict[str, list[asyncio.Future]] = {} self._initialize_tasks: dict[str, list[asyncio.Task]] = {} self._progress: dict[str, Any] = {} async def async_wait_init_flow_finish(self, handler: str) -> None: """Wait till all flows in progress are initialized.""" current = self._initializing.get(handler) if not current: return await asyncio.wait(current) @abc.abstractmethod async def async_create_flow( self, handler_key: Any, *, context: dict[str, Any] | None = None, data: dict[str, Any] | None = None, ) -> FlowHandler: """Create a flow for specified handler. Handler key is the domain of the component that we want to set up. """ @abc.abstractmethod async def async_finish_flow( self, flow: FlowHandler, result: FlowResult ) -> FlowResult: """Finish a config flow and add an entry.""" async def async_post_init(self, flow: FlowHandler, result: FlowResult) -> None: """Entry has finished executing its first step asynchronously.""" @callback def async_progress(self, include_uninitialized: bool = False) -> list[FlowResult]: """Return the flows in progress.""" return [ { "flow_id": flow.flow_id, "handler": flow.handler, "context": flow.context, "step_id": flow.cur_step["step_id"] if flow.cur_step else None, } for flow in self._progress.values() if include_uninitialized or flow.cur_step is not None ] async def async_init( self, handler: str, *, context: dict[str, Any] | None = None, data: Any = None ) -> FlowResult: """Start a configuration flow.""" if context is None: context = {} init_done: asyncio.Future = asyncio.Future() self._initializing.setdefault(handler, []).append(init_done) task = asyncio.create_task(self._async_init(init_done, handler, context, data)) self._initialize_tasks.setdefault(handler, []).append(task) try: flow, result = await task finally: self._initialize_tasks[handler].remove(task) self._initializing[handler].remove(init_done) if result["type"] != RESULT_TYPE_ABORT: await self.async_post_init(flow, result) return result async def _async_init( self, init_done: asyncio.Future, handler: str, context: dict, data: Any, ) -> tuple[FlowHandler, FlowResult]: """Run the init in a task to allow it to be canceled at shutdown.""" flow = await self.async_create_flow(handler, context=context, data=data) if not flow: raise UnknownFlow("Flow was not created") flow.hass = self.hass flow.handler = handler flow.flow_id = uuid.uuid4().hex flow.context = context self._progress[flow.flow_id] = flow result = await self._async_handle_step(flow, flow.init_step, data, init_done) return flow, result async def async_shutdown(self) -> None: """Cancel any initializing flows.""" for task_list in self._initialize_tasks.values(): for task in task_list: task.cancel() async def async_configure( self, flow_id: str, user_input: dict | None = None ) -> FlowResult: """Continue a configuration flow.""" flow = self._progress.get(flow_id) if flow is None: raise UnknownFlow cur_step = flow.cur_step if cur_step.get("data_schema") is not None and user_input is not None: user_input = cur_step["data_schema"](user_input) result = await self._async_handle_step(flow, cur_step["step_id"], user_input) if cur_step["type"] in (RESULT_TYPE_EXTERNAL_STEP, RESULT_TYPE_SHOW_PROGRESS): if cur_step["type"] == RESULT_TYPE_EXTERNAL_STEP and result["type"] not in ( RESULT_TYPE_EXTERNAL_STEP, RESULT_TYPE_EXTERNAL_STEP_DONE, ): raise ValueError( "External step can only transition to " "external step or external step done." ) if cur_step["type"] == RESULT_TYPE_SHOW_PROGRESS and result["type"] not in ( RESULT_TYPE_SHOW_PROGRESS, RESULT_TYPE_SHOW_PROGRESS_DONE, ): raise ValueError( "Show progress can only transition to show progress or show progress done." ) # If the result has changed from last result, fire event to update # the frontend. if ( cur_step["step_id"] != result.get("step_id") or result["type"] == RESULT_TYPE_SHOW_PROGRESS ): # Tell frontend to reload the flow state. self.hass.bus.async_fire( EVENT_DATA_ENTRY_FLOW_PROGRESSED, {"handler": flow.handler, "flow_id": flow_id, "refresh": True}, ) return result @callback def async_abort(self, flow_id: str) -> None: """Abort a flow.""" if self._progress.pop(flow_id, None) is None: raise UnknownFlow async def _async_handle_step( self, flow: Any, step_id: str, user_input: dict | None, step_done: asyncio.Future | None = None, ) -> FlowResult: """Handle a step of a flow.""" method = f"async_step_{step_id}" if not hasattr(flow, method): self._progress.pop(flow.flow_id) if step_done: step_done.set_result(None) raise UnknownStep( f"Handler {flow.__class__.__name__} doesn't support step {step_id}" ) try: result: FlowResult = await getattr(flow, method)(user_input) except AbortFlow as err: result = _create_abort_data( flow.flow_id, flow.handler, err.reason, err.description_placeholders ) # Mark the step as done. # We do this before calling async_finish_flow because config entries will hit a # circular dependency where async_finish_flow sets up new entry, which needs the # integration to be set up, which is waiting for init to be done. if step_done: step_done.set_result(None) if result["type"] not in ( RESULT_TYPE_FORM, RESULT_TYPE_EXTERNAL_STEP, RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_ABORT, RESULT_TYPE_EXTERNAL_STEP_DONE, RESULT_TYPE_SHOW_PROGRESS, RESULT_TYPE_SHOW_PROGRESS_DONE, ): raise ValueError(f"Handler returned incorrect type: {result['type']}") if result["type"] in ( RESULT_TYPE_FORM, RESULT_TYPE_EXTERNAL_STEP, RESULT_TYPE_EXTERNAL_STEP_DONE, RESULT_TYPE_SHOW_PROGRESS, RESULT_TYPE_SHOW_PROGRESS_DONE, ): flow.cur_step = result return result # We pass a copy of the result because we're mutating our version result = await self.async_finish_flow(flow, result.copy()) # _async_finish_flow may change result type, check it again if result["type"] == RESULT_TYPE_FORM: flow.cur_step = result return result # Abort and Success results both finish the flow self._progress.pop(flow.flow_id) return result class FlowHandler: """Handle the configuration flow of a component.""" # Set by flow manager cur_step: dict[str, str] | None = None # While not purely typed, it makes typehinting more useful for us # and removes the need for constant None checks or asserts. flow_id: str = None # type: ignore hass: HomeAssistant = None # type: ignore handler: str = None # type: ignore # Ensure the attribute has a subscriptable, but immutable, default value. context: dict[str, Any] = MappingProxyType({}) # type: ignore # Set by _async_create_flow callback init_step = "init" # Set by developer VERSION = 1 @property def source(self) -> str | None: """Source that initialized the flow.""" if not hasattr(self, "context"): return None return self.context.get("source", None) @property def show_advanced_options(self) -> bool: """If we should show advanced options.""" if not hasattr(self, "context"): return False return self.context.get("show_advanced_options", False) @callback def async_show_form( self, *, step_id: str, data_schema: vol.Schema = None, errors: dict[str, str] | None = None, description_placeholders: dict[str, Any] | None = None, last_step: bool | None = None, ) -> FlowResult: """Return the definition of a form to gather user input.""" return { "type": RESULT_TYPE_FORM, "flow_id": self.flow_id, "handler": self.handler, "step_id": step_id, "data_schema": data_schema, "errors": errors, "description_placeholders": description_placeholders, "last_step": last_step, # Display next or submit button in frontend } @callback def async_create_entry( self, *, title: str, data: Mapping[str, Any], description: str | None = None, description_placeholders: dict | None = None, ) -> FlowResult: """Finish config flow and create a config entry.""" return { "version": self.VERSION, "type": RESULT_TYPE_CREATE_ENTRY, "flow_id": self.flow_id, "handler": self.handler, "title": title, "data": data, "description": description, "description_placeholders": description_placeholders, } @callback def async_abort( self, *, reason: str, description_placeholders: dict | None = None ) -> FlowResult: """Abort the config flow.""" return _create_abort_data( self.flow_id, self.handler, reason, description_placeholders ) @callback def async_external_step( self, *, step_id: str, url: str, description_placeholders: dict | None = None ) -> FlowResult: """Return the definition of an external step for the user to take.""" return { "type": RESULT_TYPE_EXTERNAL_STEP, "flow_id": self.flow_id, "handler": self.handler, "step_id": step_id, "url": url, "description_placeholders": description_placeholders, } @callback def async_external_step_done(self, *, next_step_id: str) -> FlowResult: """Return the definition of an external step for the user to take.""" return { "type": RESULT_TYPE_EXTERNAL_STEP_DONE, "flow_id": self.flow_id, "handler": self.handler, "step_id": next_step_id, } @callback def async_show_progress( self, *, step_id: str, progress_action: str, description_placeholders: dict | None = None, ) -> FlowResult: """Show a progress message to the user, without user input allowed.""" return { "type": RESULT_TYPE_SHOW_PROGRESS, "flow_id": self.flow_id, "handler": self.handler, "step_id": step_id, "progress_action": progress_action, "description_placeholders": description_placeholders, } @callback def async_show_progress_done(self, *, next_step_id: str) -> FlowResult: """Mark the progress done.""" return { "type": RESULT_TYPE_SHOW_PROGRESS_DONE, "flow_id": self.flow_id, "handler": self.handler, "step_id": next_step_id, } @callback def _create_abort_data( flow_id: str, handler: str, reason: str, description_placeholders: dict | None = None, ) -> FlowResult: """Return the definition of an external step for the user to take.""" return { "type": RESULT_TYPE_ABORT, "flow_id": flow_id, "handler": handler, "reason": reason, "description_placeholders": description_placeholders, }
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/data_entry_flow.py
"""Support for GPSLogger.""" from aiohttp import web import voluptuous as vol from homeassistant.components.device_tracker import ( ATTR_BATTERY, DOMAIN as DEVICE_TRACKER, ) from homeassistant.const import ( ATTR_LATITUDE, ATTR_LONGITUDE, CONF_WEBHOOK_ID, HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, ) from homeassistant.helpers import config_entry_flow import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from .const import ( ATTR_ACCURACY, ATTR_ACTIVITY, ATTR_ALTITUDE, ATTR_DEVICE, ATTR_DIRECTION, ATTR_PROVIDER, ATTR_SPEED, DOMAIN, ) PLATFORMS = [DEVICE_TRACKER] TRACKER_UPDATE = f"{DOMAIN}_tracker_update" DEFAULT_ACCURACY = 200 DEFAULT_BATTERY = -1 def _id(value: str) -> str: """Coerce id by removing '-'.""" return value.replace("-", "") WEBHOOK_SCHEMA = vol.Schema( { vol.Required(ATTR_DEVICE): _id, vol.Required(ATTR_LATITUDE): cv.latitude, vol.Required(ATTR_LONGITUDE): cv.longitude, vol.Optional(ATTR_ACCURACY, default=DEFAULT_ACCURACY): vol.Coerce(float), vol.Optional(ATTR_ACTIVITY): cv.string, vol.Optional(ATTR_ALTITUDE): vol.Coerce(float), vol.Optional(ATTR_BATTERY, default=DEFAULT_BATTERY): vol.Coerce(float), vol.Optional(ATTR_DIRECTION): vol.Coerce(float), vol.Optional(ATTR_PROVIDER): cv.string, vol.Optional(ATTR_SPEED): vol.Coerce(float), } ) async def async_setup(hass, hass_config): """Set up the GPSLogger component.""" hass.data[DOMAIN] = {"devices": set(), "unsub_device_tracker": {}} return True async def handle_webhook(hass, webhook_id, request): """Handle incoming webhook with GPSLogger request.""" try: data = WEBHOOK_SCHEMA(dict(await request.post())) except vol.MultipleInvalid as error: return web.Response(text=error.error_message, status=HTTP_UNPROCESSABLE_ENTITY) attrs = { ATTR_SPEED: data.get(ATTR_SPEED), ATTR_DIRECTION: data.get(ATTR_DIRECTION), ATTR_ALTITUDE: data.get(ATTR_ALTITUDE), ATTR_PROVIDER: data.get(ATTR_PROVIDER), ATTR_ACTIVITY: data.get(ATTR_ACTIVITY), } device = data[ATTR_DEVICE] async_dispatcher_send( hass, TRACKER_UPDATE, device, (data[ATTR_LATITUDE], data[ATTR_LONGITUDE]), data[ATTR_BATTERY], data[ATTR_ACCURACY], attrs, ) return web.Response(text=f"Setting location for {device}", status=HTTP_OK) async def async_setup_entry(hass, entry): """Configure based on config entry.""" hass.components.webhook.async_register( DOMAIN, "GPSLogger", entry.data[CONF_WEBHOOK_ID], handle_webhook ) hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True async def async_unload_entry(hass, entry): """Unload a config entry.""" hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID]) hass.data[DOMAIN]["unsub_device_tracker"].pop(entry.entry_id)() return await hass.config_entries.async_unload_platforms(entry, PLATFORMS) async_remove_entry = config_entry_flow.webhook_async_remove_entry
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/gpslogger/__init__.py
"""Twilio Call platform for notify component.""" import logging import urllib from twilio.base.exceptions import TwilioRestException import voluptuous as vol from homeassistant.components.notify import ( ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService, ) from homeassistant.components.twilio import DATA_TWILIO import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_FROM_NUMBER = "from_number" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_FROM_NUMBER): vol.All( cv.string, vol.Match(r"^\+?[1-9]\d{1,14}$") ) } ) def get_service(hass, config, discovery_info=None): """Get the Twilio Call notification service.""" return TwilioCallNotificationService( hass.data[DATA_TWILIO], config[CONF_FROM_NUMBER] ) class TwilioCallNotificationService(BaseNotificationService): """Implement the notification service for the Twilio Call service.""" def __init__(self, twilio_client, from_number): """Initialize the service.""" self.client = twilio_client self.from_number = from_number def send_message(self, message="", **kwargs): """Call to specified target users.""" targets = kwargs.get(ATTR_TARGET) if not targets: _LOGGER.info("At least 1 target is required") return if message.startswith(("http://", "https://")): twimlet_url = message else: twimlet_url = "http://twimlets.com/message?Message=" twimlet_url += urllib.parse.quote(message, safe="") for target in targets: try: self.client.calls.create( to=target, url=twimlet_url, from_=self.from_number ) except TwilioRestException as exc: _LOGGER.error(exc)
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/twilio_call/notify.py
"""Support for Coinbase sensors.""" from homeassistant.components.sensor import SensorEntity from homeassistant.const import ATTR_ATTRIBUTION ATTR_NATIVE_BALANCE = "Balance in native currency" CURRENCY_ICONS = { "BTC": "mdi:currency-btc", "ETH": "mdi:currency-eth", "EUR": "mdi:currency-eur", "LTC": "mdi:litecoin", "USD": "mdi:currency-usd", } DEFAULT_COIN_ICON = "mdi:currency-usd-circle" ATTRIBUTION = "Data provided by coinbase.com" DATA_COINBASE = "coinbase_cache" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Coinbase sensors.""" if discovery_info is None: return if "account" in discovery_info: account = discovery_info["account"] sensor = AccountSensor( hass.data[DATA_COINBASE], account["name"], account["balance"]["currency"] ) if "exchange_currency" in discovery_info: sensor = ExchangeRateSensor( hass.data[DATA_COINBASE], discovery_info["exchange_currency"], discovery_info["native_currency"], ) add_entities([sensor], True) class AccountSensor(SensorEntity): """Representation of a Coinbase.com sensor.""" def __init__(self, coinbase_data, name, currency): """Initialize the sensor.""" self._coinbase_data = coinbase_data self._name = f"Coinbase {name}" self._state = None self._unit_of_measurement = currency self._native_balance = None self._native_currency = None @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement this sensor expresses itself in.""" return self._unit_of_measurement @property def icon(self): """Return the icon to use in the frontend, if any.""" return CURRENCY_ICONS.get(self._unit_of_measurement, DEFAULT_COIN_ICON) @property def extra_state_attributes(self): """Return the state attributes of the sensor.""" return { ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_NATIVE_BALANCE: f"{self._native_balance} {self._native_currency}", } def update(self): """Get the latest state of the sensor.""" self._coinbase_data.update() for account in self._coinbase_data.accounts: if self._name == f"Coinbase {account['name']}": self._state = account["balance"]["amount"] self._native_balance = account["native_balance"]["amount"] self._native_currency = account["native_balance"]["currency"] class ExchangeRateSensor(SensorEntity): """Representation of a Coinbase.com sensor.""" def __init__(self, coinbase_data, exchange_currency, native_currency): """Initialize the sensor.""" self._coinbase_data = coinbase_data self.currency = exchange_currency self._name = f"{exchange_currency} Exchange Rate" self._state = None self._unit_of_measurement = native_currency @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement this sensor expresses itself in.""" return self._unit_of_measurement @property def icon(self): """Return the icon to use in the frontend, if any.""" return CURRENCY_ICONS.get(self.currency, DEFAULT_COIN_ICON) @property def extra_state_attributes(self): """Return the state attributes of the sensor.""" return {ATTR_ATTRIBUTION: ATTRIBUTION} def update(self): """Get the latest state of the sensor.""" self._coinbase_data.update() rate = self._coinbase_data.exchange_rates.rates[self.currency] self._state = round(1 / float(rate), 2)
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/coinbase/sensor.py
"""Config flow for Mikrotik.""" import voluptuous as vol from homeassistant import config_entries from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, CONF_VERIFY_SSL, ) from homeassistant.core import callback from .const import ( CONF_ARP_PING, CONF_DETECTION_TIME, CONF_FORCE_DHCP, DEFAULT_API_PORT, DEFAULT_DETECTION_TIME, DEFAULT_NAME, DOMAIN, ) from .errors import CannotConnect, LoginError from .hub import get_api class MikrotikFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Handle a Mikrotik config flow.""" VERSION = 1 @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return MikrotikOptionsFlowHandler(config_entry) async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" errors = {} if user_input is not None: for entry in self._async_current_entries(): if entry.data[CONF_HOST] == user_input[CONF_HOST]: return self.async_abort(reason="already_configured") if entry.data[CONF_NAME] == user_input[CONF_NAME]: errors[CONF_NAME] = "name_exists" break try: await self.hass.async_add_executor_job(get_api, self.hass, user_input) except CannotConnect: errors["base"] = "cannot_connect" except LoginError: errors[CONF_USERNAME] = "invalid_auth" errors[CONF_PASSWORD] = "invalid_auth" if not errors: return self.async_create_entry( title=user_input[CONF_NAME], data=user_input ) return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_NAME, default=DEFAULT_NAME): str, vol.Required(CONF_HOST): str, vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str, vol.Optional(CONF_PORT, default=DEFAULT_API_PORT): int, vol.Optional(CONF_VERIFY_SSL, default=False): bool, } ), errors=errors, ) async def async_step_import(self, import_config): """Import Miktortik from config.""" import_config[CONF_DETECTION_TIME] = import_config[ CONF_DETECTION_TIME ].total_seconds() return await self.async_step_user(user_input=import_config) class MikrotikOptionsFlowHandler(config_entries.OptionsFlow): """Handle Mikrotik options.""" def __init__(self, config_entry): """Initialize Mikrotik options flow.""" self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the Mikrotik options.""" return await self.async_step_device_tracker() async def async_step_device_tracker(self, user_input=None): """Manage the device tracker options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) options = { vol.Optional( CONF_FORCE_DHCP, default=self.config_entry.options.get(CONF_FORCE_DHCP, False), ): bool, vol.Optional( CONF_ARP_PING, default=self.config_entry.options.get(CONF_ARP_PING, False), ): bool, vol.Optional( CONF_DETECTION_TIME, default=self.config_entry.options.get( CONF_DETECTION_TIME, DEFAULT_DETECTION_TIME ), ): int, } return self.async_show_form( step_id="device_tracker", data_schema=vol.Schema(options) )
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/mikrotik/config_flow.py
"""Support for the PRT Heatmiser themostats using the V3 protocol.""" from __future__ import annotations import logging from heatmiserV3 import connection, heatmiser import voluptuous as vol from homeassistant.components.climate import ( HVAC_MODE_HEAT, HVAC_MODE_OFF, PLATFORM_SCHEMA, ClimateEntity, ) from homeassistant.components.climate.const import SUPPORT_TARGET_TEMPERATURE from homeassistant.const import ( ATTR_TEMPERATURE, CONF_HOST, CONF_ID, CONF_NAME, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_THERMOSTATS = "tstats" TSTATS_SCHEMA = vol.Schema( vol.All( cv.ensure_list, [{vol.Required(CONF_ID): cv.positive_int, vol.Required(CONF_NAME): cv.string}], ) ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PORT): cv.string, vol.Optional(CONF_THERMOSTATS, default=[]): TSTATS_SCHEMA, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the heatmiser thermostat.""" heatmiser_v3_thermostat = heatmiser.HeatmiserThermostat host = config[CONF_HOST] port = config[CONF_PORT] thermostats = config[CONF_THERMOSTATS] uh1_hub = connection.HeatmiserUH1(host, port) add_entities( [ HeatmiserV3Thermostat(heatmiser_v3_thermostat, thermostat, uh1_hub) for thermostat in thermostats ], True, ) class HeatmiserV3Thermostat(ClimateEntity): """Representation of a HeatmiserV3 thermostat.""" def __init__(self, therm, device, uh1): """Initialize the thermostat.""" self.therm = therm(device[CONF_ID], "prt", uh1) self.uh1 = uh1 self._name = device[CONF_NAME] self._current_temperature = None self._target_temperature = None self._id = device self.dcb = None self._hvac_mode = HVAC_MODE_HEAT self._temperature_unit = None @property def supported_features(self): """Return the list of supported features.""" return SUPPORT_TARGET_TEMPERATURE @property def name(self): """Return the name of the thermostat, if any.""" return self._name @property def temperature_unit(self): """Return the unit of measurement which this thermostat uses.""" return self._temperature_unit @property def hvac_mode(self) -> str: """Return hvac operation ie. heat, cool mode. Need to be one of HVAC_MODE_*. """ return self._hvac_mode @property def hvac_modes(self) -> list[str]: """Return the list of available hvac operation modes. Need to be a subset of HVAC_MODES. """ return [HVAC_MODE_HEAT, HVAC_MODE_OFF] @property def current_temperature(self): """Return the current temperature.""" return self._current_temperature @property def target_temperature(self): """Return the temperature we try to reach.""" return self._target_temperature def set_temperature(self, **kwargs): """Set new target temperature.""" temperature = kwargs.get(ATTR_TEMPERATURE) self._target_temperature = int(temperature) self.therm.set_target_temp(self._target_temperature) def update(self): """Get the latest data.""" self.uh1.reopen() if not self.uh1.status: _LOGGER.error("Failed to update device %s", self._name) return self.dcb = self.therm.read_dcb() self._temperature_unit = ( TEMP_CELSIUS if (self.therm.get_temperature_format() == "C") else TEMP_FAHRENHEIT ) self._current_temperature = int(self.therm.get_floor_temp()) self._target_temperature = int(self.therm.get_target_temp()) self._hvac_mode = ( HVAC_MODE_OFF if (int(self.therm.get_current_state()) == 0) else HVAC_MODE_HEAT )
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/heatmiser/climate.py
"""Support for Ecovacs Deebot vacuums.""" import logging import random import string from sucks import EcoVacsAPI, VacBot import voluptuous as vol from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DOMAIN = "ecovacs" CONF_COUNTRY = "country" CONF_CONTINENT = "continent" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_COUNTRY): vol.All(vol.Lower, cv.string), vol.Required(CONF_CONTINENT): vol.All(vol.Lower, cv.string), } ) }, extra=vol.ALLOW_EXTRA, ) ECOVACS_DEVICES = "ecovacs_devices" # Generate a random device ID on each bootup ECOVACS_API_DEVICEID = "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(8) ) def setup(hass, config): """Set up the Ecovacs component.""" _LOGGER.debug("Creating new Ecovacs component") hass.data[ECOVACS_DEVICES] = [] ecovacs_api = EcoVacsAPI( ECOVACS_API_DEVICEID, config[DOMAIN].get(CONF_USERNAME), EcoVacsAPI.md5(config[DOMAIN].get(CONF_PASSWORD)), config[DOMAIN].get(CONF_COUNTRY), config[DOMAIN].get(CONF_CONTINENT), ) devices = ecovacs_api.devices() _LOGGER.debug("Ecobot devices: %s", devices) for device in devices: _LOGGER.info( "Discovered Ecovacs device on account: %s with nickname %s", device["did"], device["nick"], ) vacbot = VacBot( ecovacs_api.uid, ecovacs_api.REALM, ecovacs_api.resource, ecovacs_api.user_access_token, device, config[DOMAIN].get(CONF_CONTINENT).lower(), monitor=True, ) hass.data[ECOVACS_DEVICES].append(vacbot) def stop(event: object) -> None: """Shut down open connections to Ecovacs XMPP server.""" for device in hass.data[ECOVACS_DEVICES]: _LOGGER.info( "Shutting down connection to Ecovacs device %s", device.vacuum["did"] ) device.disconnect() # Listen for HA stop to disconnect. hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop) if hass.data[ECOVACS_DEVICES]: _LOGGER.debug("Starting vacuum components") discovery.load_platform(hass, "vacuum", DOMAIN, {}, config) return True
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/ecovacs/__init__.py
"""The Ruckus Unleashed integration.""" from pyruckus import Ruckus from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import device_registry from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC from .const import ( API_AP, API_DEVICE_NAME, API_ID, API_MAC, API_MODEL, API_SYSTEM_OVERVIEW, API_VERSION, COORDINATOR, DOMAIN, MANUFACTURER, PLATFORMS, UNDO_UPDATE_LISTENERS, ) from .coordinator import RuckusUnleashedDataUpdateCoordinator async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Ruckus Unleashed from a config entry.""" try: ruckus = await hass.async_add_executor_job( Ruckus, entry.data[CONF_HOST], entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], ) except ConnectionError as error: raise ConfigEntryNotReady from error coordinator = RuckusUnleashedDataUpdateCoordinator(hass, ruckus=ruckus) await coordinator.async_config_entry_first_refresh() system_info = await hass.async_add_executor_job(ruckus.system_info) registry = await device_registry.async_get_registry(hass) ap_info = await hass.async_add_executor_job(ruckus.ap_info) for device in ap_info[API_AP][API_ID].values(): registry.async_get_or_create( config_entry_id=entry.entry_id, connections={(CONNECTION_NETWORK_MAC, device[API_MAC])}, identifiers={(CONNECTION_NETWORK_MAC, device[API_MAC])}, manufacturer=MANUFACTURER, name=device[API_DEVICE_NAME], model=device[API_MODEL], sw_version=system_info[API_SYSTEM_OVERVIEW][API_VERSION], ) hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][entry.entry_id] = { COORDINATOR: coordinator, UNDO_UPDATE_LISTENERS: [], } hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) if unload_ok: for listener in hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENERS]: listener() hass.data[DOMAIN].pop(entry.entry_id) return unload_ok
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/ruckus_unleashed/__init__.py
"""Support for Neato Connected Vacuums.""" from datetime import timedelta import logging from pybotvac.exceptions import NeatoRobotException import voluptuous as vol from homeassistant.components.vacuum import ( ATTR_STATUS, STATE_CLEANING, STATE_DOCKED, STATE_ERROR, STATE_IDLE, STATE_PAUSED, STATE_RETURNING, SUPPORT_BATTERY, SUPPORT_CLEAN_SPOT, SUPPORT_LOCATE, SUPPORT_MAP, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_START, SUPPORT_STATE, SUPPORT_STOP, StateVacuumEntity, ) from homeassistant.const import ATTR_MODE from homeassistant.helpers import config_validation as cv, entity_platform from .const import ( ACTION, ALERTS, ERRORS, MODE, NEATO_DOMAIN, NEATO_LOGIN, NEATO_MAP_DATA, NEATO_PERSISTENT_MAPS, NEATO_ROBOTS, SCAN_INTERVAL_MINUTES, ) _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(minutes=SCAN_INTERVAL_MINUTES) SUPPORT_NEATO = ( SUPPORT_BATTERY | SUPPORT_PAUSE | SUPPORT_RETURN_HOME | SUPPORT_STOP | SUPPORT_START | SUPPORT_CLEAN_SPOT | SUPPORT_STATE | SUPPORT_MAP | SUPPORT_LOCATE ) ATTR_CLEAN_START = "clean_start" ATTR_CLEAN_STOP = "clean_stop" ATTR_CLEAN_AREA = "clean_area" ATTR_CLEAN_BATTERY_START = "battery_level_at_clean_start" ATTR_CLEAN_BATTERY_END = "battery_level_at_clean_end" ATTR_CLEAN_SUSP_COUNT = "clean_suspension_count" ATTR_CLEAN_SUSP_TIME = "clean_suspension_time" ATTR_CLEAN_PAUSE_TIME = "clean_pause_time" ATTR_CLEAN_ERROR_TIME = "clean_error_time" ATTR_LAUNCHED_FROM = "launched_from" ATTR_NAVIGATION = "navigation" ATTR_CATEGORY = "category" ATTR_ZONE = "zone" async def async_setup_entry(hass, entry, async_add_entities): """Set up Neato vacuum with config entry.""" dev = [] neato = hass.data.get(NEATO_LOGIN) mapdata = hass.data.get(NEATO_MAP_DATA) persistent_maps = hass.data.get(NEATO_PERSISTENT_MAPS) for robot in hass.data[NEATO_ROBOTS]: dev.append(NeatoConnectedVacuum(neato, robot, mapdata, persistent_maps)) if not dev: return _LOGGER.debug("Adding vacuums %s", dev) async_add_entities(dev, True) platform = entity_platform.async_get_current_platform() assert platform is not None platform.async_register_entity_service( "custom_cleaning", { vol.Optional(ATTR_MODE, default=2): cv.positive_int, vol.Optional(ATTR_NAVIGATION, default=1): cv.positive_int, vol.Optional(ATTR_CATEGORY, default=4): cv.positive_int, vol.Optional(ATTR_ZONE): cv.string, }, "neato_custom_cleaning", ) class NeatoConnectedVacuum(StateVacuumEntity): """Representation of a Neato Connected Vacuum.""" def __init__(self, neato, robot, mapdata, persistent_maps): """Initialize the Neato Connected Vacuum.""" self.robot = robot self._available = neato is not None self._mapdata = mapdata self._name = f"{self.robot.name}" self._robot_has_map = self.robot.has_persistent_maps self._robot_maps = persistent_maps self._robot_serial = self.robot.serial self._status_state = None self._clean_state = None self._state = None self._clean_time_start = None self._clean_time_stop = None self._clean_area = None self._clean_battery_start = None self._clean_battery_end = None self._clean_susp_charge_count = None self._clean_susp_time = None self._clean_pause_time = None self._clean_error_time = None self._launched_from = None self._battery_level = None self._robot_boundaries = [] self._robot_stats = None def update(self): """Update the states of Neato Vacuums.""" _LOGGER.debug("Running Neato Vacuums update for '%s'", self.entity_id) try: if self._robot_stats is None: self._robot_stats = self.robot.get_general_info().json().get("data") except NeatoRobotException: _LOGGER.warning("Couldn't fetch robot information of %s", self.entity_id) try: self._state = self.robot.state except NeatoRobotException as ex: if self._available: # print only once when available _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex ) self._state = None self._available = False return self._available = True _LOGGER.debug("self._state=%s", self._state) if "alert" in self._state: robot_alert = ALERTS.get(self._state["alert"]) else: robot_alert = None if self._state["state"] == 1: if self._state["details"]["isCharging"]: self._clean_state = STATE_DOCKED self._status_state = "Charging" elif ( self._state["details"]["isDocked"] and not self._state["details"]["isCharging"] ): self._clean_state = STATE_DOCKED self._status_state = "Docked" else: self._clean_state = STATE_IDLE self._status_state = "Stopped" if robot_alert is not None: self._status_state = robot_alert elif self._state["state"] == 2: if robot_alert is None: self._clean_state = STATE_CLEANING self._status_state = ( f"{MODE.get(self._state['cleaning']['mode'])} " f"{ACTION.get(self._state['action'])}" ) if ( "boundary" in self._state["cleaning"] and "name" in self._state["cleaning"]["boundary"] ): self._status_state += ( f" {self._state['cleaning']['boundary']['name']}" ) else: self._status_state = robot_alert elif self._state["state"] == 3: self._clean_state = STATE_PAUSED self._status_state = "Paused" elif self._state["state"] == 4: self._clean_state = STATE_ERROR self._status_state = ERRORS.get(self._state["error"]) self._battery_level = self._state["details"]["charge"] if not self._mapdata.get(self._robot_serial, {}).get("maps", []): return mapdata = self._mapdata[self._robot_serial]["maps"][0] self._clean_time_start = mapdata["start_at"] self._clean_time_stop = mapdata["end_at"] self._clean_area = mapdata["cleaned_area"] self._clean_susp_charge_count = mapdata["suspended_cleaning_charging_count"] self._clean_susp_time = mapdata["time_in_suspended_cleaning"] self._clean_pause_time = mapdata["time_in_pause"] self._clean_error_time = mapdata["time_in_error"] self._clean_battery_start = mapdata["run_charge_at_start"] self._clean_battery_end = mapdata["run_charge_at_end"] self._launched_from = mapdata["launched_from"] if ( self._robot_has_map and self._state["availableServices"]["maps"] != "basic-1" and self._robot_maps[self._robot_serial] ): allmaps = self._robot_maps[self._robot_serial] _LOGGER.debug( "Found the following maps for '%s': %s", self.entity_id, allmaps ) self._robot_boundaries = [] # Reset boundaries before refreshing boundaries for maps in allmaps: try: robot_boundaries = self.robot.get_map_boundaries(maps["id"]).json() except NeatoRobotException as ex: _LOGGER.error( "Could not fetch map boundaries for '%s': %s", self.entity_id, ex, ) return _LOGGER.debug( "Boundaries for robot '%s' in map '%s': %s", self.entity_id, maps["name"], robot_boundaries, ) if "boundaries" in robot_boundaries["data"]: self._robot_boundaries += robot_boundaries["data"]["boundaries"] _LOGGER.debug( "List of boundaries for '%s': %s", self.entity_id, self._robot_boundaries, ) @property def name(self): """Return the name of the device.""" return self._name @property def supported_features(self): """Flag vacuum cleaner robot features that are supported.""" return SUPPORT_NEATO @property def battery_level(self): """Return the battery level of the vacuum cleaner.""" return self._battery_level @property def available(self): """Return if the robot is available.""" return self._available @property def icon(self): """Return neato specific icon.""" return "mdi:robot-vacuum-variant" @property def state(self): """Return the status of the vacuum cleaner.""" return self._clean_state @property def unique_id(self): """Return a unique ID.""" return self._robot_serial @property def extra_state_attributes(self): """Return the state attributes of the vacuum cleaner.""" data = {} if self._status_state is not None: data[ATTR_STATUS] = self._status_state if self._clean_time_start is not None: data[ATTR_CLEAN_START] = self._clean_time_start if self._clean_time_stop is not None: data[ATTR_CLEAN_STOP] = self._clean_time_stop if self._clean_area is not None: data[ATTR_CLEAN_AREA] = self._clean_area if self._clean_susp_charge_count is not None: data[ATTR_CLEAN_SUSP_COUNT] = self._clean_susp_charge_count if self._clean_susp_time is not None: data[ATTR_CLEAN_SUSP_TIME] = self._clean_susp_time if self._clean_pause_time is not None: data[ATTR_CLEAN_PAUSE_TIME] = self._clean_pause_time if self._clean_error_time is not None: data[ATTR_CLEAN_ERROR_TIME] = self._clean_error_time if self._clean_battery_start is not None: data[ATTR_CLEAN_BATTERY_START] = self._clean_battery_start if self._clean_battery_end is not None: data[ATTR_CLEAN_BATTERY_END] = self._clean_battery_end if self._launched_from is not None: data[ATTR_LAUNCHED_FROM] = self._launched_from return data @property def device_info(self): """Device info for neato robot.""" info = {"identifiers": {(NEATO_DOMAIN, self._robot_serial)}, "name": self._name} if self._robot_stats: info["manufacturer"] = self._robot_stats["battery"]["vendor"] info["model"] = self._robot_stats["model"] info["sw_version"] = self._robot_stats["firmware"] return info def start(self): """Start cleaning or resume cleaning.""" try: if self._state["state"] == 1: self.robot.start_cleaning() elif self._state["state"] == 3: self.robot.resume_cleaning() except NeatoRobotException as ex: _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex ) def pause(self): """Pause the vacuum.""" try: self.robot.pause_cleaning() except NeatoRobotException as ex: _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex ) def return_to_base(self, **kwargs): """Set the vacuum cleaner to return to the dock.""" try: if self._clean_state == STATE_CLEANING: self.robot.pause_cleaning() self._clean_state = STATE_RETURNING self.robot.send_to_base() except NeatoRobotException as ex: _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex ) def stop(self, **kwargs): """Stop the vacuum cleaner.""" try: self.robot.stop_cleaning() except NeatoRobotException as ex: _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex ) def locate(self, **kwargs): """Locate the robot by making it emit a sound.""" try: self.robot.locate() except NeatoRobotException as ex: _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex ) def clean_spot(self, **kwargs): """Run a spot cleaning starting from the base.""" try: self.robot.start_spot_cleaning() except NeatoRobotException as ex: _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex ) def neato_custom_cleaning(self, mode, navigation, category, zone=None): """Zone cleaning service call.""" boundary_id = None if zone is not None: for boundary in self._robot_boundaries: if zone in boundary["name"]: boundary_id = boundary["id"] if boundary_id is None: _LOGGER.error( "Zone '%s' was not found for the robot '%s'", zone, self.entity_id ) return _LOGGER.info("Start cleaning zone '%s' with robot %s", zone, self.entity_id) self._clean_state = STATE_CLEANING try: self.robot.start_cleaning(mode, navigation, category, boundary_id) except NeatoRobotException as ex: _LOGGER.error( "Neato vacuum connection error for '%s': %s", self.entity_id, ex )
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/neato/vacuum.py
"""Support for Netgear LTE modems.""" import asyncio from datetime import timedelta import logging import aiohttp import attr import eternalegypt import voluptuous as vol from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.const import ( CONF_HOST, CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_PASSWORD, CONF_RECIPIENT, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import callback from homeassistant.helpers import config_validation as cv, discovery from homeassistant.helpers.aiohttp_client import async_create_clientsession from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval from . import sensor_types _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=10) DISPATCHER_NETGEAR_LTE = "netgear_lte_update" DOMAIN = "netgear_lte" DATA_KEY = "netgear_lte" EVENT_SMS = "netgear_lte_sms" SERVICE_DELETE_SMS = "delete_sms" SERVICE_SET_OPTION = "set_option" SERVICE_CONNECT_LTE = "connect_lte" SERVICE_DISCONNECT_LTE = "disconnect_lte" ATTR_HOST = "host" ATTR_SMS_ID = "sms_id" ATTR_FROM = "from" ATTR_MESSAGE = "message" ATTR_FAILOVER = "failover" ATTR_AUTOCONNECT = "autoconnect" FAILOVER_MODES = ["auto", "wire", "mobile"] AUTOCONNECT_MODES = ["never", "home", "always"] NOTIFY_SCHEMA = vol.Schema( { vol.Optional(CONF_NAME, default=DOMAIN): cv.string, vol.Optional(CONF_RECIPIENT, default=[]): vol.All(cv.ensure_list, [cv.string]), } ) SENSOR_SCHEMA = vol.Schema( { vol.Optional( CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_SENSORS ): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_SENSORS)]) } ) BINARY_SENSOR_SCHEMA = vol.Schema( { vol.Optional( CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_BINARY_SENSORS ): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_BINARY_SENSORS)]) } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(NOTIFY_DOMAIN, default={}): vol.All( cv.ensure_list, [NOTIFY_SCHEMA] ), vol.Optional(SENSOR_DOMAIN, default={}): SENSOR_SCHEMA, vol.Optional( BINARY_SENSOR_DOMAIN, default={} ): BINARY_SENSOR_SCHEMA, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) DELETE_SMS_SCHEMA = vol.Schema( { vol.Optional(ATTR_HOST): cv.string, vol.Required(ATTR_SMS_ID): vol.All(cv.ensure_list, [cv.positive_int]), } ) SET_OPTION_SCHEMA = vol.Schema( vol.All( cv.has_at_least_one_key(ATTR_FAILOVER, ATTR_AUTOCONNECT), { vol.Optional(ATTR_HOST): cv.string, vol.Optional(ATTR_FAILOVER): vol.In(FAILOVER_MODES), vol.Optional(ATTR_AUTOCONNECT): vol.In(AUTOCONNECT_MODES), }, ) ) CONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string}) DISCONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string}) @attr.s class ModemData: """Class for modem state.""" hass = attr.ib() host = attr.ib() modem = attr.ib() data = attr.ib(init=False, default=None) connected = attr.ib(init=False, default=True) async def async_update(self): """Call the API to update the data.""" try: self.data = await self.modem.information() if not self.connected: _LOGGER.warning("Connected to %s", self.host) self.connected = True except eternalegypt.Error: if self.connected: _LOGGER.warning("Lost connection to %s", self.host) self.connected = False self.data = None async_dispatcher_send(self.hass, DISPATCHER_NETGEAR_LTE) @attr.s class LTEData: """Shared state.""" websession = attr.ib() modem_data = attr.ib(init=False, factory=dict) def get_modem_data(self, config): """Get modem_data for the host in config.""" if config[CONF_HOST] is not None: return self.modem_data.get(config[CONF_HOST]) if len(self.modem_data) != 1: return None return next(iter(self.modem_data.values())) async def async_setup(hass, config): """Set up Netgear LTE component.""" if DATA_KEY not in hass.data: websession = async_create_clientsession( hass, cookie_jar=aiohttp.CookieJar(unsafe=True) ) hass.data[DATA_KEY] = LTEData(websession) async def service_handler(service): """Apply a service.""" host = service.data.get(ATTR_HOST) conf = {CONF_HOST: host} modem_data = hass.data[DATA_KEY].get_modem_data(conf) if not modem_data: _LOGGER.error("%s: host %s unavailable", service.service, host) return if service.service == SERVICE_DELETE_SMS: for sms_id in service.data[ATTR_SMS_ID]: await modem_data.modem.delete_sms(sms_id) elif service.service == SERVICE_SET_OPTION: failover = service.data.get(ATTR_FAILOVER) if failover: await modem_data.modem.set_failover_mode(failover) autoconnect = service.data.get(ATTR_AUTOCONNECT) if autoconnect: await modem_data.modem.set_autoconnect_mode(autoconnect) elif service.service == SERVICE_CONNECT_LTE: await modem_data.modem.connect_lte() elif service.service == SERVICE_DISCONNECT_LTE: await modem_data.modem.disconnect_lte() service_schemas = { SERVICE_DELETE_SMS: DELETE_SMS_SCHEMA, SERVICE_SET_OPTION: SET_OPTION_SCHEMA, SERVICE_CONNECT_LTE: CONNECT_LTE_SCHEMA, SERVICE_DISCONNECT_LTE: DISCONNECT_LTE_SCHEMA, } for service, schema in service_schemas.items(): hass.services.async_register( DOMAIN, service, service_handler, schema=schema ) netgear_lte_config = config[DOMAIN] # Set up each modem tasks = [_setup_lte(hass, lte_conf) for lte_conf in netgear_lte_config] await asyncio.wait(tasks) # Load platforms for each modem for lte_conf in netgear_lte_config: # Notify for notify_conf in lte_conf[NOTIFY_DOMAIN]: discovery_info = { CONF_HOST: lte_conf[CONF_HOST], CONF_NAME: notify_conf.get(CONF_NAME), NOTIFY_DOMAIN: notify_conf, } hass.async_create_task( discovery.async_load_platform( hass, NOTIFY_DOMAIN, DOMAIN, discovery_info, config ) ) # Sensor sensor_conf = lte_conf.get(SENSOR_DOMAIN) discovery_info = {CONF_HOST: lte_conf[CONF_HOST], SENSOR_DOMAIN: sensor_conf} hass.async_create_task( discovery.async_load_platform( hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config ) ) # Binary Sensor binary_sensor_conf = lte_conf.get(BINARY_SENSOR_DOMAIN) discovery_info = { CONF_HOST: lte_conf[CONF_HOST], BINARY_SENSOR_DOMAIN: binary_sensor_conf, } hass.async_create_task( discovery.async_load_platform( hass, BINARY_SENSOR_DOMAIN, DOMAIN, discovery_info, config ) ) return True async def _setup_lte(hass, lte_config): """Set up a Netgear LTE modem.""" host = lte_config[CONF_HOST] password = lte_config[CONF_PASSWORD] websession = hass.data[DATA_KEY].websession modem = eternalegypt.Modem(hostname=host, websession=websession) modem_data = ModemData(hass, host, modem) try: await _login(hass, modem_data, password) except eternalegypt.Error: retry_task = hass.loop.create_task(_retry_login(hass, modem_data, password)) @callback def cleanup_retry(event): """Clean up retry task resources.""" if not retry_task.done(): retry_task.cancel() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry) async def _login(hass, modem_data, password): """Log in and complete setup.""" await modem_data.modem.login(password=password) def fire_sms_event(sms): """Send an SMS event.""" data = { ATTR_HOST: modem_data.host, ATTR_SMS_ID: sms.id, ATTR_FROM: sms.sender, ATTR_MESSAGE: sms.message, } hass.bus.async_fire(EVENT_SMS, data) await modem_data.modem.add_sms_listener(fire_sms_event) await modem_data.async_update() hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data async def _update(now): """Periodic update.""" await modem_data.async_update() update_unsub = async_track_time_interval(hass, _update, SCAN_INTERVAL) async def cleanup(event): """Clean up resources.""" update_unsub() await modem_data.modem.logout() del hass.data[DATA_KEY].modem_data[modem_data.host] hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup) async def _retry_login(hass, modem_data, password): """Sleep and retry setup.""" _LOGGER.warning("Could not connect to %s. Will keep trying", modem_data.host) modem_data.connected = False delay = 15 while not modem_data.connected: await asyncio.sleep(delay) try: await _login(hass, modem_data, password) except eternalegypt.Error: delay = min(2 * delay, 300) @attr.s class LTEEntity(Entity): """Base LTE entity.""" modem_data = attr.ib() sensor_type = attr.ib() _unique_id = attr.ib(init=False) @_unique_id.default def _init_unique_id(self): """Register unique_id while we know data is valid.""" return f"{self.sensor_type}_{self.modem_data.data.serial_number}" async def async_added_to_hass(self): """Register callback.""" self.async_on_remove( async_dispatcher_connect( self.hass, DISPATCHER_NETGEAR_LTE, self.async_write_ha_state ) ) async def async_update(self): """Force update of state.""" await self.modem_data.async_update() @property def should_poll(self): """Return that the sensor should not be polled.""" return False @property def available(self): """Return the availability of the sensor.""" return self.modem_data.data is not None @property def unique_id(self): """Return a unique ID like 'usage_5TG365AB0078V'.""" return self._unique_id @property def name(self): """Return the name of the sensor.""" return f"Netgear LTE {self.sensor_type}"
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/netgear_lte/__init__.py
"""Support for monitoring the Syncthing instance.""" import logging import aiosyncthing from homeassistant.components.sensor import SensorEntity from homeassistant.core import callback from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.event import async_track_time_interval from .const import ( DOMAIN, FOLDER_PAUSED_RECEIVED, FOLDER_SENSOR_ALERT_ICON, FOLDER_SENSOR_DEFAULT_ICON, FOLDER_SENSOR_ICONS, FOLDER_SUMMARY_RECEIVED, SCAN_INTERVAL, SERVER_AVAILABLE, SERVER_UNAVAILABLE, STATE_CHANGED_RECEIVED, ) _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Syncthing sensors.""" syncthing = hass.data[DOMAIN][config_entry.entry_id] try: config = await syncthing.system.config() version = await syncthing.system.version() except aiosyncthing.exceptions.SyncthingError as exception: raise PlatformNotReady from exception server_id = syncthing.server_id entities = [ FolderSensor( syncthing, server_id, folder["id"], folder["label"], version["version"], ) for folder in config["folders"] ] async_add_entities(entities) class FolderSensor(SensorEntity): """A Syncthing folder sensor.""" STATE_ATTRIBUTES = { "errors": "errors", "globalBytes": "global_bytes", "globalDeleted": "global_deleted", "globalDirectories": "global_directories", "globalFiles": "global_files", "globalSymlinks": "global_symlinks", "globalTotalItems": "global_total_items", "ignorePatterns": "ignore_patterns", "inSyncBytes": "in_sync_bytes", "inSyncFiles": "in_sync_files", "invalid": "invalid", "localBytes": "local_bytes", "localDeleted": "local_deleted", "localDirectories": "local_directories", "localFiles": "local_files", "localSymlinks": "local_symlinks", "localTotalItems": "local_total_items", "needBytes": "need_bytes", "needDeletes": "need_deletes", "needDirectories": "need_directories", "needFiles": "need_files", "needSymlinks": "need_symlinks", "needTotalItems": "need_total_items", "pullErrors": "pull_errors", "state": "state", } def __init__(self, syncthing, server_id, folder_id, folder_label, version): """Initialize the sensor.""" self._syncthing = syncthing self._server_id = server_id self._folder_id = folder_id self._folder_label = folder_label self._state = None self._unsub_timer = None self._version = version self._short_server_id = server_id.split("-")[0] @property def name(self): """Return the name of the sensor.""" return f"{self._short_server_id} {self._folder_id} {self._folder_label}" @property def unique_id(self): """Return the unique id of the entity.""" return f"{self._short_server_id}-{self._folder_id}" @property def state(self): """Return the state of the sensor.""" return self._state["state"] @property def available(self): """Could the device be accessed during the last update call.""" return self._state is not None @property def icon(self): """Return the icon for this sensor.""" if self._state is None: return FOLDER_SENSOR_DEFAULT_ICON if self.state in FOLDER_SENSOR_ICONS: return FOLDER_SENSOR_ICONS[self.state] return FOLDER_SENSOR_ALERT_ICON @property def extra_state_attributes(self): """Return the state attributes.""" return self._state @property def should_poll(self): """Return the polling requirement for this sensor.""" return False @property def device_info(self): """Return device information.""" return { "identifiers": {(DOMAIN, self._server_id)}, "name": f"Syncthing ({self._syncthing.url})", "manufacturer": "Syncthing Team", "sw_version": self._version, "entry_type": "service", } async def async_update_status(self): """Request folder status and update state.""" try: state = await self._syncthing.database.status(self._folder_id) except aiosyncthing.exceptions.SyncthingError: self._state = None else: self._state = self._filter_state(state) self.async_write_ha_state() def subscribe(self): """Start polling syncthing folder status.""" if self._unsub_timer is None: async def refresh(event_time): """Get the latest data from Syncthing.""" await self.async_update_status() self._unsub_timer = async_track_time_interval( self.hass, refresh, SCAN_INTERVAL ) @callback def unsubscribe(self): """Stop polling syncthing folder status.""" if self._unsub_timer is not None: self._unsub_timer() self._unsub_timer = None async def async_added_to_hass(self): """Handle entity which will be added.""" @callback def handle_folder_summary(event): if self._state is not None: self._state = self._filter_state(event["data"]["summary"]) self.async_write_ha_state() self.async_on_remove( async_dispatcher_connect( self.hass, f"{FOLDER_SUMMARY_RECEIVED}-{self._server_id}-{self._folder_id}", handle_folder_summary, ) ) @callback def handle_state_changed(event): if self._state is not None: self._state["state"] = event["data"]["to"] self.async_write_ha_state() self.async_on_remove( async_dispatcher_connect( self.hass, f"{STATE_CHANGED_RECEIVED}-{self._server_id}-{self._folder_id}", handle_state_changed, ) ) @callback def handle_folder_paused(event): if self._state is not None: self._state["state"] = "paused" self.async_write_ha_state() self.async_on_remove( async_dispatcher_connect( self.hass, f"{FOLDER_PAUSED_RECEIVED}-{self._server_id}-{self._folder_id}", handle_folder_paused, ) ) @callback def handle_server_unavailable(): self._state = None self.unsubscribe() self.async_write_ha_state() self.async_on_remove( async_dispatcher_connect( self.hass, f"{SERVER_UNAVAILABLE}-{self._server_id}", handle_server_unavailable, ) ) async def handle_server_available(): self.subscribe() await self.async_update_status() self.async_on_remove( async_dispatcher_connect( self.hass, f"{SERVER_AVAILABLE}-{self._server_id}", handle_server_available, ) ) self.subscribe() self.async_on_remove(self.unsubscribe) await self.async_update_status() def _filter_state(self, state): # Select only needed state attributes and map their names state = { self.STATE_ATTRIBUTES[key]: value for key, value in state.items() if key in self.STATE_ATTRIBUTES } # A workaround, for some reason, state of paused folders is an empty string if state["state"] == "": state["state"] = "paused" # Add some useful attributes state["id"] = self._folder_id state["label"] = self._folder_label return state
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/syncthing/sensor.py
"""The Compensation integration.""" import logging import warnings import numpy as np import voluptuous as vol from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.const import ( CONF_ATTRIBUTE, CONF_SOURCE, CONF_UNIQUE_ID, CONF_UNIT_OF_MEASUREMENT, ) from homeassistant.helpers import config_validation as cv from homeassistant.helpers.discovery import async_load_platform from .const import ( CONF_COMPENSATION, CONF_DATAPOINTS, CONF_DEGREE, CONF_POLYNOMIAL, CONF_PRECISION, DATA_COMPENSATION, DEFAULT_DEGREE, DEFAULT_PRECISION, DOMAIN, ) _LOGGER = logging.getLogger(__name__) def datapoints_greater_than_degree(value: dict) -> dict: """Validate data point list is greater than polynomial degrees.""" if len(value[CONF_DATAPOINTS]) <= value[CONF_DEGREE]: raise vol.Invalid( f"{CONF_DATAPOINTS} must have at least {value[CONF_DEGREE]+1} {CONF_DATAPOINTS}" ) return value COMPENSATION_SCHEMA = vol.Schema( { vol.Required(CONF_SOURCE): cv.entity_id, vol.Required(CONF_DATAPOINTS): [ vol.ExactSequence([vol.Coerce(float), vol.Coerce(float)]) ], vol.Optional(CONF_UNIQUE_ID): cv.string, vol.Optional(CONF_ATTRIBUTE): cv.string, vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): cv.positive_int, vol.Optional(CONF_DEGREE, default=DEFAULT_DEGREE): vol.All( vol.Coerce(int), vol.Range(min=1, max=7), ), vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( {cv.slug: vol.All(COMPENSATION_SCHEMA, datapoints_greater_than_degree)} ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Compensation sensor.""" hass.data[DATA_COMPENSATION] = {} for compensation, conf in config.get(DOMAIN).items(): _LOGGER.debug("Setup %s.%s", DOMAIN, compensation) degree = conf[CONF_DEGREE] # get x values and y values from the x,y point pairs x_values, y_values = zip(*conf[CONF_DATAPOINTS]) # try to get valid coefficients for a polynomial coefficients = None with np.errstate(all="raise"): with warnings.catch_warnings(record=True) as all_warnings: warnings.simplefilter("always") try: coefficients = np.polyfit(x_values, y_values, degree) except FloatingPointError as error: _LOGGER.error( "Setup of %s encountered an error, %s", compensation, error, ) for warning in all_warnings: _LOGGER.warning( "Setup of %s encountered a warning, %s", compensation, str(warning.message).lower(), ) if coefficients is not None: data = { k: v for k, v in conf.items() if k not in [CONF_DEGREE, CONF_DATAPOINTS] } data[CONF_POLYNOMIAL] = np.poly1d(coefficients) hass.data[DATA_COMPENSATION][compensation] = data hass.async_create_task( async_load_platform( hass, SENSOR_DOMAIN, DOMAIN, {CONF_COMPENSATION: compensation}, config, ) ) return True
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/compensation/__init__.py
"""Support for Tado sensors for each zone.""" import logging from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE, PERCENTAGE, TEMP_CELSIUS, ) from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from .const import ( CONDITIONS_MAP, DATA, DOMAIN, SIGNAL_TADO_UPDATE_RECEIVED, TYPE_AIR_CONDITIONING, TYPE_HEATING, TYPE_HOT_WATER, ) from .entity import TadoHomeEntity, TadoZoneEntity _LOGGER = logging.getLogger(__name__) HOME_SENSORS = { "outdoor temperature", "solar percentage", "weather condition", } ZONE_SENSORS = { TYPE_HEATING: [ "temperature", "humidity", "heating", "tado mode", ], TYPE_AIR_CONDITIONING: [ "temperature", "humidity", "ac", "tado mode", ], TYPE_HOT_WATER: ["tado mode"], } def format_condition(condition: str) -> str: """Return condition from dict CONDITIONS_MAP.""" for key, value in CONDITIONS_MAP.items(): if condition in value: return key return condition async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities ): """Set up the Tado sensor platform.""" tado = hass.data[DOMAIN][entry.entry_id][DATA] zones = tado.zones entities = [] # Create home sensors entities.extend([TadoHomeSensor(tado, variable) for variable in HOME_SENSORS]) # Create zone sensors for zone in zones: zone_type = zone["type"] if zone_type not in ZONE_SENSORS: _LOGGER.warning("Unknown zone type skipped: %s", zone_type) continue entities.extend( [ TadoZoneSensor(tado, zone["name"], zone["id"], variable) for variable in ZONE_SENSORS[zone_type] ] ) if entities: async_add_entities(entities, True) class TadoHomeSensor(TadoHomeEntity, SensorEntity): """Representation of a Tado Sensor.""" def __init__(self, tado, home_variable): """Initialize of the Tado Sensor.""" super().__init__(tado) self._tado = tado self.home_variable = home_variable self._unique_id = f"{home_variable} {tado.home_id}" self._state = None self._state_attributes = None self._tado_weather_data = self._tado.data["weather"] async def async_added_to_hass(self): """Register for sensor updates.""" self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_TADO_UPDATE_RECEIVED.format( self._tado.home_id, "weather", "data" ), self._async_update_callback, ) ) self._async_update_home_data() @property def unique_id(self): """Return the unique id.""" return self._unique_id @property def name(self): """Return the name of the sensor.""" return f"{self._tado.home_name} {self.home_variable}" @property def state(self): """Return the state of the sensor.""" return self._state @property def extra_state_attributes(self): """Return the state attributes.""" return self._state_attributes @property def unit_of_measurement(self): """Return the unit of measurement.""" if self.home_variable == "temperature": return TEMP_CELSIUS if self.home_variable == "solar percentage": return PERCENTAGE if self.home_variable == "weather condition": return None @property def device_class(self): """Return the device class.""" if self.home_variable == "outdoor temperature": return DEVICE_CLASS_TEMPERATURE return None @callback def _async_update_callback(self): """Update and write state.""" self._async_update_home_data() self.async_write_ha_state() @callback def _async_update_home_data(self): """Handle update callbacks.""" try: self._tado_weather_data = self._tado.data["weather"] except KeyError: return if self.home_variable == "outdoor temperature": self._state = self.hass.config.units.temperature( self._tado_weather_data["outsideTemperature"]["celsius"], TEMP_CELSIUS, ) self._state_attributes = { "time": self._tado_weather_data["outsideTemperature"]["timestamp"], } elif self.home_variable == "solar percentage": self._state = self._tado_weather_data["solarIntensity"]["percentage"] self._state_attributes = { "time": self._tado_weather_data["solarIntensity"]["timestamp"], } elif self.home_variable == "weather condition": self._state = format_condition( self._tado_weather_data["weatherState"]["value"] ) self._state_attributes = { "time": self._tado_weather_data["weatherState"]["timestamp"] } class TadoZoneSensor(TadoZoneEntity, SensorEntity): """Representation of a tado Sensor.""" def __init__(self, tado, zone_name, zone_id, zone_variable): """Initialize of the Tado Sensor.""" self._tado = tado super().__init__(zone_name, tado.home_id, zone_id) self.zone_variable = zone_variable self._unique_id = f"{zone_variable} {zone_id} {tado.home_id}" self._state = None self._state_attributes = None self._tado_zone_data = None async def async_added_to_hass(self): """Register for sensor updates.""" self.async_on_remove( async_dispatcher_connect( self.hass, SIGNAL_TADO_UPDATE_RECEIVED.format( self._tado.home_id, "zone", self.zone_id ), self._async_update_callback, ) ) self._async_update_zone_data() @property def unique_id(self): """Return the unique id.""" return self._unique_id @property def name(self): """Return the name of the sensor.""" return f"{self.zone_name} {self.zone_variable}" @property def state(self): """Return the state of the sensor.""" return self._state @property def extra_state_attributes(self): """Return the state attributes.""" return self._state_attributes @property def unit_of_measurement(self): """Return the unit of measurement.""" if self.zone_variable == "temperature": return self.hass.config.units.temperature_unit if self.zone_variable == "humidity": return PERCENTAGE if self.zone_variable == "heating": return PERCENTAGE if self.zone_variable == "ac": return None @property def device_class(self): """Return the device class.""" if self.zone_variable == "humidity": return DEVICE_CLASS_HUMIDITY if self.zone_variable == "temperature": return DEVICE_CLASS_TEMPERATURE return None @callback def _async_update_callback(self): """Update and write state.""" self._async_update_zone_data() self.async_write_ha_state() @callback def _async_update_zone_data(self): """Handle update callbacks.""" try: self._tado_zone_data = self._tado.data["zone"][self.zone_id] except KeyError: return if self.zone_variable == "temperature": self._state = self.hass.config.units.temperature( self._tado_zone_data.current_temp, TEMP_CELSIUS ) self._state_attributes = { "time": self._tado_zone_data.current_temp_timestamp, "setting": 0, # setting is used in climate device } elif self.zone_variable == "humidity": self._state = self._tado_zone_data.current_humidity self._state_attributes = { "time": self._tado_zone_data.current_humidity_timestamp } elif self.zone_variable == "heating": self._state = self._tado_zone_data.heating_power_percentage self._state_attributes = { "time": self._tado_zone_data.heating_power_timestamp } elif self.zone_variable == "ac": self._state = self._tado_zone_data.ac_power self._state_attributes = {"time": self._tado_zone_data.ac_power_timestamp} elif self.zone_variable == "tado mode": self._state = self._tado_zone_data.tado_mode
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/tado/sensor.py
"""The JuiceNet integration.""" from datetime import timedelta import logging import aiohttp from pyjuicenet import Api, TokenError import voluptuous as vol from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import CONF_ACCESS_TOKEN from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import config_validation as cv from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from .const import DOMAIN, JUICENET_API, JUICENET_COORDINATOR from .device import JuiceNetApi _LOGGER = logging.getLogger(__name__) PLATFORMS = ["sensor", "switch"] CONFIG_SCHEMA = vol.Schema( vol.All( cv.deprecated(DOMAIN), {DOMAIN: vol.Schema({vol.Required(CONF_ACCESS_TOKEN): cv.string})}, ), extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistant, config: dict): """Set up the JuiceNet component.""" conf = config.get(DOMAIN) hass.data.setdefault(DOMAIN, {}) if not conf: return True hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=conf ) ) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): """Set up JuiceNet from a config entry.""" config = entry.data session = async_get_clientsession(hass) access_token = config[CONF_ACCESS_TOKEN] api = Api(access_token, session) juicenet = JuiceNetApi(api) try: await juicenet.setup() except TokenError as error: _LOGGER.error("JuiceNet Error %s", error) return False except aiohttp.ClientError as error: _LOGGER.error("Could not reach the JuiceNet API %s", error) raise ConfigEntryNotReady from error if not juicenet.devices: _LOGGER.error("No JuiceNet devices found for this account") return False _LOGGER.info("%d JuiceNet device(s) found", len(juicenet.devices)) async def async_update_data(): """Update all device states from the JuiceNet API.""" for device in juicenet.devices: await device.update_state(True) return True coordinator = DataUpdateCoordinator( hass, _LOGGER, name="JuiceNet", update_method=async_update_data, update_interval=timedelta(seconds=30), ) hass.data[DOMAIN][entry.entry_id] = { JUICENET_API: juicenet, JUICENET_COORDINATOR: coordinator, } await coordinator.async_config_entry_first_refresh() hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok
"""The tests for the discovery component.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.bootstrap import async_setup_component from homeassistant.components import discovery from homeassistant.const import EVENT_HOMEASSISTANT_STARTED from homeassistant.util.dt import utcnow from tests.common import async_fire_time_changed, mock_coro # One might consider to "mock" services, but it's easy enough to just use # what is already available. SERVICE = "yamaha" SERVICE_COMPONENT = "media_player" SERVICE_NO_PLATFORM = "netgear_router" SERVICE_NO_PLATFORM_COMPONENT = "device_tracker" SERVICE_INFO = {"key": "value"} # Can be anything UNKNOWN_SERVICE = "this_service_will_never_be_supported" BASE_CONFIG = {discovery.DOMAIN: {"ignore": [], "enable": []}} IGNORE_CONFIG = {discovery.DOMAIN: {"ignore": [SERVICE_NO_PLATFORM]}} @pytest.fixture(autouse=True) def netdisco_mock(): """Mock netdisco.""" with patch.dict("sys.modules", {"netdisco.discovery": MagicMock()}): yield async def mock_discovery(hass, discoveries, config=BASE_CONFIG): """Mock discoveries.""" with patch("homeassistant.components.zeroconf.async_get_instance"), patch( "homeassistant.components.zeroconf.async_setup", return_value=True ), patch.object(discovery, "_discover", discoveries), patch( "homeassistant.components.discovery.async_discover" ) as mock_discover, patch( "homeassistant.components.discovery.async_load_platform", return_value=mock_coro(), ) as mock_platform: assert await async_setup_component(hass, "discovery", config) await hass.async_block_till_done() await hass.async_start() hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() async_fire_time_changed(hass, utcnow()) # Work around an issue where our loop.call_soon not get caught await hass.async_block_till_done() await hass.async_block_till_done() return mock_discover, mock_platform async def test_unknown_service(hass): """Test that unknown service is ignored.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("this_service_will_never_be_supported", {"info": "some"})] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert not mock_platform.called async def test_load_platform(hass): """Test load a platform.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert not mock_discover.called assert mock_platform.called mock_platform.assert_called_with( hass, SERVICE_COMPONENT, SERVICE, SERVICE_INFO, BASE_CONFIG ) async def test_load_component(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_ignore_service(hass): """Test ignore service.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [(SERVICE_NO_PLATFORM, SERVICE_INFO)] mock_discover, mock_platform = await mock_discovery(hass, discover, IGNORE_CONFIG) assert not mock_discover.called assert not mock_platform.called async def test_discover_duplicates(hass): """Test load a component.""" def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [ (SERVICE_NO_PLATFORM, SERVICE_INFO), (SERVICE_NO_PLATFORM, SERVICE_INFO), ] mock_discover, mock_platform = await mock_discovery(hass, discover) assert mock_discover.called assert mock_discover.call_count == 1 assert not mock_platform.called mock_discover.assert_called_with( hass, SERVICE_NO_PLATFORM, SERVICE_INFO, SERVICE_NO_PLATFORM_COMPONENT, BASE_CONFIG, ) async def test_discover_config_flow(hass): """Test discovery triggering a config flow.""" discovery_info = {"hello": "world"} def discover(netdisco, zeroconf_instance): """Fake discovery.""" return [("mock-service", discovery_info)] with patch.dict( discovery.CONFIG_ENTRY_HANDLERS, {"mock-service": "mock-component"} ), patch("homeassistant.data_entry_flow.FlowManager.async_init") as m_init: await mock_discovery(hass, discover) assert len(m_init.mock_calls) == 1 args, kwargs = m_init.mock_calls[0][1:] assert args == ("mock-component",) assert kwargs["context"]["source"] == config_entries.SOURCE_DISCOVERY assert kwargs["data"] == discovery_info
home-assistant/home-assistant
tests/components/discovery/test_init.py
homeassistant/components/juicenet/__init__.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TaskReschedule tracks rescheduled task instances.""" from sqlalchemy import Column, ForeignKeyConstraint, Index, Integer, String, asc, desc from airflow.models.base import COLLATION_ARGS, ID_LEN, Base from airflow.utils.session import provide_session from airflow.utils.sqlalchemy import UtcDateTime class TaskReschedule(Base): """TaskReschedule tracks rescheduled task instances.""" __tablename__ = "task_reschedule" id = Column(Integer, primary_key=True) task_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False) dag_id = Column(String(ID_LEN, **COLLATION_ARGS), nullable=False) execution_date = Column(UtcDateTime, nullable=False) try_number = Column(Integer, nullable=False) start_date = Column(UtcDateTime, nullable=False) end_date = Column(UtcDateTime, nullable=False) duration = Column(Integer, nullable=False) reschedule_date = Column(UtcDateTime, nullable=False) __table_args__ = ( Index('idx_task_reschedule_dag_task_date', dag_id, task_id, execution_date, unique=False), ForeignKeyConstraint( [task_id, dag_id, execution_date], ['task_instance.task_id', 'task_instance.dag_id', 'task_instance.execution_date'], name='task_reschedule_dag_task_date_fkey', ondelete='CASCADE', ), ) def __init__(self, task, execution_date, try_number, start_date, end_date, reschedule_date): self.dag_id = task.dag_id self.task_id = task.task_id self.execution_date = execution_date self.try_number = try_number self.start_date = start_date self.end_date = end_date self.reschedule_date = reschedule_date self.duration = (self.end_date - self.start_date).total_seconds() @staticmethod @provide_session def query_for_task_instance(task_instance, descending=False, session=None): """ Returns query for task reschedules for a given the task instance. :param session: the database session object :type session: sqlalchemy.orm.session.Session :param task_instance: the task instance to find task reschedules for :type task_instance: airflow.models.TaskInstance :param descending: If True then records are returned in descending order :type descending: bool """ TR = TaskReschedule qry = session.query(TR).filter( TR.dag_id == task_instance.dag_id, TR.task_id == task_instance.task_id, TR.execution_date == task_instance.execution_date, TR.try_number == task_instance.try_number, ) if descending: return qry.order_by(desc(TR.id)) else: return qry.order_by(asc(TR.id)) @staticmethod @provide_session def find_for_task_instance(task_instance, session=None): """ Returns all task reschedules for the task instance and try number, in ascending order. :param session: the database session object :type session: sqlalchemy.orm.session.Session :param task_instance: the task instance to find task reschedules for :type task_instance: airflow.models.TaskInstance """ return TaskReschedule.query_for_task_instance(task_instance, session=session).all()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import os from unittest import mock import pytest from airflow.models import Connection from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AWS_KEY from tests.test_utils.gcp_system_helpers import GoogleSystemTest, provide_gcp_context ROLE_ANR = os.environ.get('GCP_AWS_ROLE_ANR', "arn:aws:iam::123456:role/role_arn") AUDIENCE = os.environ.get('GCP_AWS_AUDIENCE', 'aws-federation.airflow.apache.org') @pytest.mark.system("google.cloud") @pytest.mark.credential_file(GCP_AWS_KEY) class AwsBaseHookSystemTest(GoogleSystemTest): @provide_gcp_context(GCP_AWS_KEY) def test_run_example_gcp_vision_autogenerated_id_dag(self): mock_connection = Connection( conn_type="aws", extra=json.dumps( { "role_arn": ROLE_ANR, "assume_role_method": "assume_role_with_web_identity", "assume_role_with_web_identity_federation": 'google', "assume_role_with_web_identity_federation_audience": AUDIENCE, } ), ) with mock.patch.dict('os.environ', AIRFLOW_CONN_AWS_DEFAULT=mock_connection.get_uri()): hook = AwsBaseHook(client_type='s3') client = hook.get_conn() response = client.list_buckets() self.assertIn('Buckets', response)
DinoCow/airflow
tests/providers/amazon/aws/hooks/test_base_aws_system.py
airflow/models/taskreschedule.py
import os import errno class DeleteOnError(object): def __init__(self, where): self.where = where self.f = None self.remove_regardless = False def __enter__(self): self.f = open(self.where, 'wb') return self def __exit__(self, typ, value, traceback): try: if typ is not None or self.remove_regardless: os.unlink(self.where) except EnvironmentError as e: if e.errno != errno.ENOENT: raise finally: if self.f: self.f.close()
import gevent import pytest from fast_wait import fast_wait from wal_e import worker from wal_e.exception import UserCritical assert fast_wait class Explosion(Exception): """Marker type for fault injection.""" pass class FakeWalSegment(object): def __init__(self, seg_path, explicit=False, upload_explosive=False, mark_done_explosive=False): self.explicit = explicit self._upload_explosive = upload_explosive self._mark_done_explosive = mark_done_explosive self._marked = False self._uploaded = False def mark_done(self): if self._mark_done_explosive: raise self._mark_done_explosive self._marked = True class FakeWalUploader(object): def __call__(self, segment): if segment._upload_explosive: raise segment._upload_explosive segment._uploaded = True return segment def failed(seg): """Returns true if a segment could be a failed upload. Or in progress, the two are not distinguished. """ return seg._marked is False and seg._uploaded is False def success(seg): """Returns true if a segment has been successfully uploaded. Checks that mark_done was not called if this is an 'explicit' wal segment from Postgres. """ if seg.explicit: assert seg._marked is False return seg._uploaded def indeterminate(seg): """Returns true as long as the segment is internally consistent. Checks invariants of mark_done, depending on whether the segment has been uploaded. This is useful in cases with tests with failures and concurrent execution, and calls out the state of the segment in any case to the reader. """ if seg._uploaded: if seg.explicit: assert seg._marked is False else: assert seg._marked is True else: assert seg._marked is False return True def prepare_multi_upload_segments(): """Prepare a handful of fake segments for upload.""" # The first segment is special, being explicitly passed by # Postgres. yield FakeWalSegment('0' * 8 * 3, explicit=True) # Additional segments are non-explicit, which means they will have # their metadata manipulated by wal-e rather than relying on the # Postgres archiver. for i in xrange(1, 5): yield FakeWalSegment(str(i) * 8 * 3, explicit=False) def test_simple_upload(): """Model a case where there is no concurrency while uploading.""" group = worker.WalTransferGroup(FakeWalUploader()) seg = FakeWalSegment('1' * 8 * 3, explicit=True) group.start(seg) group.join() assert success(seg) def test_multi_upload(): """Model a case with upload concurrency.""" group = worker.WalTransferGroup(FakeWalUploader()) segments = list(prepare_multi_upload_segments()) # "Start" fake uploads for seg in segments: group.start(seg) group.join() # Check invariants on the non-explicit segments. for seg in segments: assert success(seg) def test_simple_fail(): """Model a simple failure in the non-concurrent case.""" group = worker.WalTransferGroup(FakeWalUploader()) exp = Explosion('fail') seg = FakeWalSegment('1' * 8 * 3, explicit=True, upload_explosive=exp) group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp assert failed(seg) def test_multi_explicit_fail(): """Model a failure of the explicit segment under concurrency.""" group = worker.WalTransferGroup(FakeWalUploader()) segments = list(prepare_multi_upload_segments()) exp = Explosion('fail') segments[0]._upload_explosive = exp for seg in segments: group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp assert failed(segments[0]) for seg in segments[1:]: assert success(seg) def test_multi_pipeline_fail(): """Model a failure of the pipelined segments under concurrency.""" group = worker.WalTransferGroup(FakeWalUploader()) segments = list(prepare_multi_upload_segments()) exp = Explosion('fail') fail_idx = 2 segments[fail_idx]._upload_explosive = exp for seg in segments: group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp for i, seg in enumerate(segments): if i == fail_idx: assert failed(seg) else: # Given race conditions in conjunction with exceptions -- # which will abort waiting for other greenlets to finish # -- one can't know very much about the final state of # segment. assert indeterminate(seg) def test_finally_execution(): """When one segment fails ensure parallel segments clean up.""" segBad = FakeWalSegment('1' * 8 * 3) segOK = FakeWalSegment('2' * 8 * 3) class CleanupCheckingUploader(object): def __init__(self): self.cleaned_up = False def __call__(self, segment): if segment is segOK: try: while True: gevent.sleep(0.1) finally: self.cleaned_up = True elif segment is segBad: raise Explosion('fail') else: assert False, 'Expect only two segments' segment._uploaded = True return segment uploader = CleanupCheckingUploader() group = worker.WalTransferGroup(uploader) group.start(segOK) group.start(segBad) with pytest.raises(Explosion): group.join() assert uploader.cleaned_up is True def test_start_after_join(): """Break an invariant by adding transfers after .join.""" group = worker.WalTransferGroup(FakeWalUploader()) group.join() seg = FakeWalSegment('arbitrary') with pytest.raises(UserCritical): group.start(seg) def test_mark_done_fault(): """Exercise exception handling from .mark_done()""" group = worker.WalTransferGroup(FakeWalUploader()) exp = Explosion('boom') seg = FakeWalSegment('arbitrary', mark_done_explosive=exp) group.start(seg) with pytest.raises(Explosion) as e: group.join() assert e.value is exp
ArtemZ/wal-e
tests/test_wal_transfer.py
wal_e/files.py
import sys import distutils.errors from setuptools.compat import httplib, HTTPError, unicode, pathname2url import pkg_resources import setuptools.package_index from setuptools.tests.server import IndexServer class TestPackageIndex: def test_bad_url_bad_port(self): index = setuptools.package_index.PackageIndex() url = 'http://127.0.0.1:0/nonesuch/test_package_index' try: v = index.open_url(url) except Exception as v: assert url in str(v) else: assert isinstance(v, HTTPError) def test_bad_url_typo(self): # issue 16 # easy_install inquant.contentmirror.plone breaks because of a typo # in its home URL index = setuptools.package_index.PackageIndex( hosts=('www.example.com',) ) url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk' try: v = index.open_url(url) except Exception as v: assert url in str(v) else: assert isinstance(v, HTTPError) def test_bad_url_bad_status_line(self): index = setuptools.package_index.PackageIndex( hosts=('www.example.com',) ) def _urlopen(*args): raise httplib.BadStatusLine('line') index.opener = _urlopen url = 'http://example.com' try: v = index.open_url(url) except Exception as v: assert 'line' in str(v) else: raise AssertionError('Should have raise here!') def test_bad_url_double_scheme(self): """ A bad URL with a double scheme should raise a DistutilsError. """ index = setuptools.package_index.PackageIndex( hosts=('www.example.com',) ) # issue 20 url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk' try: index.open_url(url) except distutils.errors.DistutilsError as error: msg = unicode(error) assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg return raise RuntimeError("Did not raise") def test_bad_url_screwy_href(self): index = setuptools.package_index.PackageIndex( hosts=('www.example.com',) ) # issue #160 if sys.version_info[0] == 2 and sys.version_info[1] == 7: # this should not fail url = 'http://example.com' page = ('<a href="http://www.famfamfam.com](' 'http://www.famfamfam.com/">') index.process_index(url, page) def test_url_ok(self): index = setuptools.package_index.PackageIndex( hosts=('www.example.com',) ) url = 'file:///tmp/test_package_index' assert index.url_ok(url, True) def test_links_priority(self): """ Download links from the pypi simple index should be used before external download links. https://bitbucket.org/tarek/distribute/issue/163 Usecase : - someone uploads a package on pypi, a md5 is generated - someone manually copies this link (with the md5 in the url) onto an external page accessible from the package page. - someone reuploads the package (with a different md5) - while easy_installing, an MD5 error occurs because the external link is used -> Setuptools should use the link from pypi, not the external one. """ if sys.platform.startswith('java'): # Skip this test on jython because binding to :0 fails return # start an index server server = IndexServer() server.start() index_url = server.base_url() + 'test_links_priority/simple/' # scan a test index pi = setuptools.package_index.PackageIndex(index_url) requirement = pkg_resources.Requirement.parse('foobar') pi.find_packages(requirement) server.stop() # the distribution has been found assert 'foobar' in pi # we have only one link, because links are compared without md5 assert len(pi['foobar'])==1 # the link should be from the index assert 'correct_md5' in pi['foobar'][0].location def test_parse_bdist_wininst(self): parse = setuptools.package_index.parse_bdist_wininst actual = parse('reportlab-2.5.win32-py2.4.exe') expected = 'reportlab-2.5', '2.4', 'win32' assert actual == expected actual = parse('reportlab-2.5.win32.exe') expected = 'reportlab-2.5', None, 'win32' assert actual == expected actual = parse('reportlab-2.5.win-amd64-py2.7.exe') expected = 'reportlab-2.5', '2.7', 'win-amd64' assert actual == expected actual = parse('reportlab-2.5.win-amd64.exe') expected = 'reportlab-2.5', None, 'win-amd64' assert actual == expected def test__vcs_split_rev_from_url(self): """ Test the basic usage of _vcs_split_rev_from_url """ vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url url, rev = vsrfu('https://example.com/bar@2995') assert url == 'https://example.com/bar' assert rev == '2995' def test_local_index(self, tmpdir): """ local_open should be able to read an index from the file system. """ index_file = tmpdir / 'index.html' with index_file.open('w') as f: f.write('<div>content</div>') url = 'file:' + pathname2url(str(tmpdir)) + '/' res = setuptools.package_index.local_open(url) assert 'content' in res.read() class TestContentCheckers: def test_md5(self): checker = setuptools.package_index.HashChecker.from_url( 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478') checker.feed('You should probably not be using MD5'.encode('ascii')) assert checker.hash.hexdigest() == 'f12895fdffbd45007040d2e44df98478' assert checker.is_valid() def test_other_fragment(self): "Content checks should succeed silently if no hash is present" checker = setuptools.package_index.HashChecker.from_url( 'http://foo/bar#something%20completely%20different') checker.feed('anything'.encode('ascii')) assert checker.is_valid() def test_blank_md5(self): "Content checks should succeed if a hash is empty" checker = setuptools.package_index.HashChecker.from_url( 'http://foo/bar#md5=') checker.feed('anything'.encode('ascii')) assert checker.is_valid() def test_get_hash_name_md5(self): checker = setuptools.package_index.HashChecker.from_url( 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478') assert checker.hash_name == 'md5' def test_report(self): checker = setuptools.package_index.HashChecker.from_url( 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478') rep = checker.report(lambda x: x, 'My message about %s') assert rep == 'My message about md5'
# -*- coding: utf-8 -*- """Easy install Tests """ from __future__ import absolute_import import sys import os import shutil import tempfile import site import contextlib import tarfile import logging import itertools import distutils.errors import pytest try: from unittest import mock except ImportError: import mock from setuptools import sandbox from setuptools import compat from setuptools.compat import StringIO, BytesIO, urlparse from setuptools.sandbox import run_setup import setuptools.command.easy_install as ei from setuptools.command.easy_install import PthDistributions from setuptools.command import easy_install as easy_install_pkg from setuptools.dist import Distribution from pkg_resources import working_set from pkg_resources import Distribution as PRDistribution import setuptools.tests.server import pkg_resources from .py26compat import tarfile_open from . import contexts from .textwrap import DALS class FakeDist(object): def get_entry_map(self, group): if group != 'console_scripts': return {} return {'name': 'ep'} def as_requirement(self): return 'spec' SETUP_PY = DALS(""" from setuptools import setup setup(name='foo') """) class TestEasyInstallTest: def test_install_site_py(self): dist = Distribution() cmd = ei.easy_install(dist) cmd.sitepy_installed = False cmd.install_dir = tempfile.mkdtemp() try: cmd.install_site_py() sitepy = os.path.join(cmd.install_dir, 'site.py') assert os.path.exists(sitepy) finally: shutil.rmtree(cmd.install_dir) def test_get_script_args(self): header = ei.CommandSpec.best().from_environment().as_header() expected = header + DALS(""" # EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name' __requires__ = 'spec' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('spec', 'console_scripts', 'name')() ) """) dist = FakeDist() args = next(ei.ScriptWriter.get_args(dist)) name, script = itertools.islice(args, 2) assert script == expected def test_no_find_links(self): # new option '--no-find-links', that blocks find-links added at # the project level dist = Distribution() cmd = ei.easy_install(dist) cmd.check_pth_processing = lambda: True cmd.no_find_links = True cmd.find_links = ['link1', 'link2'] cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') cmd.args = ['ok'] cmd.ensure_finalized() assert cmd.package_index.scanned_urls == {} # let's try without it (default behavior) cmd = ei.easy_install(dist) cmd.check_pth_processing = lambda: True cmd.find_links = ['link1', 'link2'] cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') cmd.args = ['ok'] cmd.ensure_finalized() keys = sorted(cmd.package_index.scanned_urls.keys()) assert keys == ['link1', 'link2'] def test_write_exception(self): """ Test that `cant_write_to_target` is rendered as a DistutilsError. """ dist = Distribution() cmd = ei.easy_install(dist) cmd.install_dir = os.getcwd() with pytest.raises(distutils.errors.DistutilsError): cmd.cant_write_to_target() class TestPTHFileWriter: def test_add_from_cwd_site_sets_dirty(self): '''a pth file manager should set dirty if a distribution is in site but also the cwd ''' pth = PthDistributions('does-not_exist', [os.getcwd()]) assert not pth.dirty pth.add(PRDistribution(os.getcwd())) assert pth.dirty def test_add_from_site_is_ignored(self): location = '/test/location/does-not-have-to-exist' # PthDistributions expects all locations to be normalized location = pkg_resources.normalize_path(location) pth = PthDistributions('does-not_exist', [location, ]) assert not pth.dirty pth.add(PRDistribution(location)) assert not pth.dirty @pytest.yield_fixture def setup_context(tmpdir): with (tmpdir/'setup.py').open('w') as f: f.write(SETUP_PY) with tmpdir.as_cwd(): yield tmpdir @pytest.mark.usefixtures("user_override") @pytest.mark.usefixtures("setup_context") class TestUserInstallTest: @mock.patch('setuptools.command.easy_install.__file__', None) def test_user_install_implied(self): easy_install_pkg.__file__ = site.USER_SITE site.ENABLE_USER_SITE = True # disabled sometimes #XXX: replace with something meaningfull dist = Distribution() dist.script_name = 'setup.py' cmd = ei.easy_install(dist) cmd.args = ['py'] cmd.ensure_finalized() assert cmd.user, 'user should be implied' def test_multiproc_atexit(self): try: __import__('multiprocessing') except ImportError: # skip the test if multiprocessing is not available return log = logging.getLogger('test_easy_install') logging.basicConfig(level=logging.INFO, stream=sys.stderr) log.info('this should not break') def test_user_install_not_implied_without_usersite_enabled(self): site.ENABLE_USER_SITE = False # usually enabled #XXX: replace with something meaningfull dist = Distribution() dist.script_name = 'setup.py' cmd = ei.easy_install(dist) cmd.args = ['py'] cmd.initialize_options() assert not cmd.user, 'NOT user should be implied' def test_local_index(self): # make sure the local index is used # when easy_install looks for installed # packages new_location = tempfile.mkdtemp() target = tempfile.mkdtemp() egg_file = os.path.join(new_location, 'foo-1.0.egg-info') with open(egg_file, 'w') as f: f.write('Name: foo\n') sys.path.append(target) old_ppath = os.environ.get('PYTHONPATH') os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path) try: dist = Distribution() dist.script_name = 'setup.py' cmd = ei.easy_install(dist) cmd.install_dir = target cmd.args = ['foo'] cmd.ensure_finalized() cmd.local_index.scan([new_location]) res = cmd.easy_install('foo') actual = os.path.normcase(os.path.realpath(res.location)) expected = os.path.normcase(os.path.realpath(new_location)) assert actual == expected finally: sys.path.remove(target) for basedir in [new_location, target, ]: if not os.path.exists(basedir) or not os.path.isdir(basedir): continue try: shutil.rmtree(basedir) except: pass if old_ppath is not None: os.environ['PYTHONPATH'] = old_ppath else: del os.environ['PYTHONPATH'] @contextlib.contextmanager def user_install_setup_context(self, *args, **kwargs): """ Wrap sandbox.setup_context to patch easy_install in that context to appear as user-installed. """ with self.orig_context(*args, **kwargs): import setuptools.command.easy_install as ei ei.__file__ = site.USER_SITE yield def patched_setup_context(self): self.orig_context = sandbox.setup_context return mock.patch( 'setuptools.sandbox.setup_context', self.user_install_setup_context, ) def test_setup_requires(self): """Regression test for Distribute issue #318 Ensure that a package with setup_requires can be installed when setuptools is installed in the user site-packages without causing a SandboxViolation. """ test_pkg = create_setup_requires_package(os.getcwd()) test_setup_py = os.path.join(test_pkg, 'setup.py') try: with contexts.quiet(): with self.patched_setup_context(): run_setup(test_setup_py, ['install']) except IndexError: # Test fails in some cases due to bugs in Python # See https://bitbucket.org/pypa/setuptools/issue/201 pass @pytest.yield_fixture def distutils_package(): distutils_setup_py = SETUP_PY.replace( 'from setuptools import setup', 'from distutils.core import setup', ) with contexts.tempdir(cd=os.chdir): with open('setup.py', 'w') as f: f.write(distutils_setup_py) yield class TestDistutilsPackage: def test_bdist_egg_available_on_distutils_pkg(self, distutils_package): run_setup('setup.py', ['bdist_egg']) class TestSetupRequires: def test_setup_requires_honors_fetch_params(self): """ When easy_install installs a source distribution which specifies setup_requires, it should honor the fetch parameters (such as allow-hosts, index-url, and find-links). """ # set up a server which will simulate an alternate package index. p_index = setuptools.tests.server.MockServer() p_index.start() netloc = 1 p_index_loc = urlparse(p_index.url)[netloc] if p_index_loc.endswith(':0'): # Some platforms (Jython) don't find a port to which to bind, # so skip this test for them. return with contexts.quiet(): # create an sdist that has a build-time dependency. with TestSetupRequires.create_sdist() as dist_file: with contexts.tempdir() as temp_install_dir: with contexts.environment(PYTHONPATH=temp_install_dir): ei_params = [ '--index-url', p_index.url, '--allow-hosts', p_index_loc, '--exclude-scripts', '--install-dir', temp_install_dir, dist_file, ] with contexts.argv(['easy_install']): # attempt to install the dist. It should fail because # it doesn't exist. with pytest.raises(SystemExit): easy_install_pkg.main(ei_params) # there should have been two or three requests to the server # (three happens on Python 3.3a) assert 2 <= len(p_index.requests) <= 3 assert p_index.requests[0].path == '/does-not-exist/' @staticmethod @contextlib.contextmanager def create_sdist(): """ Return an sdist with a setup_requires dependency (of something that doesn't exist) """ with contexts.tempdir() as dir: dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz') script = DALS(""" import setuptools setuptools.setup( name="setuptools-test-fetcher", version="1.0", setup_requires = ['does-not-exist'], ) """) make_trivial_sdist(dist_path, script) yield dist_path def test_setup_requires_overrides_version_conflict(self): """ Regression test for issue #323. Ensures that a distribution's setup_requires requirements can still be installed and used locally even if a conflicting version of that requirement is already on the path. """ pr_state = pkg_resources.__getstate__() fake_dist = PRDistribution('does-not-matter', project_name='foobar', version='0.0') working_set.add(fake_dist) try: with contexts.tempdir() as temp_dir: test_pkg = create_setup_requires_package(temp_dir) test_setup_py = os.path.join(test_pkg, 'setup.py') with contexts.quiet() as (stdout, stderr): # Don't even need to install the package, just # running the setup.py at all is sufficient run_setup(test_setup_py, ['--name']) lines = stdout.readlines() assert len(lines) > 0 assert lines[-1].strip(), 'test_pkg' finally: pkg_resources.__setstate__(pr_state) def create_setup_requires_package(path): """Creates a source tree under path for a trivial test package that has a single requirement in setup_requires--a tarball for that requirement is also created and added to the dependency_links argument. """ test_setup_attrs = { 'name': 'test_pkg', 'version': '0.0', 'setup_requires': ['foobar==0.1'], 'dependency_links': [os.path.abspath(path)] } test_pkg = os.path.join(path, 'test_pkg') test_setup_py = os.path.join(test_pkg, 'setup.py') os.mkdir(test_pkg) with open(test_setup_py, 'w') as f: f.write(DALS(""" import setuptools setuptools.setup(**%r) """ % test_setup_attrs)) foobar_path = os.path.join(path, 'foobar-0.1.tar.gz') make_trivial_sdist( foobar_path, DALS(""" import setuptools setuptools.setup( name='foobar', version='0.1' ) """)) return test_pkg def make_trivial_sdist(dist_path, setup_py): """Create a simple sdist tarball at dist_path, containing just a setup.py, the contents of which are provided by the setup_py string. """ setup_py_file = tarfile.TarInfo(name='setup.py') try: # Python 3 (StringIO gets converted to io module) MemFile = BytesIO except AttributeError: MemFile = StringIO setup_py_bytes = MemFile(setup_py.encode('utf-8')) setup_py_file.size = len(setup_py_bytes.getvalue()) with tarfile_open(dist_path, 'w:gz') as dist: dist.addfile(setup_py_file, fileobj=setup_py_bytes) class TestScriptHeader: non_ascii_exe = '/Users/José/bin/python' exe_with_spaces = r'C:\Program Files\Python33\python.exe' @pytest.mark.skipif( sys.platform.startswith('java') and ei.is_sh(sys.executable), reason="Test cannot run under java when executable is sh" ) def test_get_script_header(self): expected = '#!%s\n' % ei.nt_quote_arg(os.path.normpath(sys.executable)) actual = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python') assert actual == expected expected = '#!%s -x\n' % ei.nt_quote_arg(os.path.normpath (sys.executable)) actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x') assert actual == expected actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python', executable=self.non_ascii_exe) expected = '#!%s -x\n' % self.non_ascii_exe assert actual == expected actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python', executable='"'+self.exe_with_spaces+'"') expected = '#!"%s"\n' % self.exe_with_spaces assert actual == expected @pytest.mark.xfail( compat.PY3 and os.environ.get("LC_CTYPE") in ("C", "POSIX"), reason="Test fails in this locale on Python 3" ) @mock.patch.dict(sys.modules, java=mock.Mock(lang=mock.Mock(System= mock.Mock(getProperty=mock.Mock(return_value=""))))) @mock.patch('sys.platform', 'java1.5.0_13') def test_get_script_header_jython_workaround(self, tmpdir): # Create a mock sys.executable that uses a shebang line header = DALS(""" #!/usr/bin/python # -*- coding: utf-8 -*- """) exe = tmpdir / 'exe.py' with exe.open('w') as f: f.write(header) exe = str(exe) header = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python', executable=exe) assert header == '#!/usr/bin/env %s\n' % exe expect_out = 'stdout' if sys.version_info < (2,7) else 'stderr' with contexts.quiet() as (stdout, stderr): # When options are included, generate a broken shebang line # with a warning emitted candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x', executable=exe) assert candidate == '#!%s -x\n' % exe output = locals()[expect_out] assert 'Unable to adapt shebang line' in output.getvalue() with contexts.quiet() as (stdout, stderr): candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python', executable=self.non_ascii_exe) assert candidate == '#!%s -x\n' % self.non_ascii_exe output = locals()[expect_out] assert 'Unable to adapt shebang line' in output.getvalue() class TestCommandSpec: def test_custom_launch_command(self): """ Show how a custom CommandSpec could be used to specify a #! executable which takes parameters. """ cmd = ei.CommandSpec(['/usr/bin/env', 'python3']) assert cmd.as_header() == '#!/usr/bin/env python3\n' def test_from_param_for_CommandSpec_is_passthrough(self): """ from_param should return an instance of a CommandSpec """ cmd = ei.CommandSpec(['python']) cmd_new = ei.CommandSpec.from_param(cmd) assert cmd is cmd_new def test_from_environment_with_spaces_in_executable(self): with mock.patch('sys.executable', TestScriptHeader.exe_with_spaces): cmd = ei.CommandSpec.from_environment() assert len(cmd) == 1 assert cmd.as_header().startswith('#!"') def test_from_simple_string_uses_shlex(self): """ In order to support `executable = /usr/bin/env my-python`, make sure from_param invokes shlex on that input. """ cmd = ei.CommandSpec.from_param('/usr/bin/env my-python') assert len(cmd) == 2 assert '"' not in cmd.as_header() def test_sys_executable(self): """ CommandSpec.from_string(sys.executable) should contain just that param. """ writer = ei.ScriptWriter.best() cmd = writer.command_spec_class.from_string(sys.executable) assert len(cmd) == 1 assert cmd[0] == sys.executable class TestWindowsScriptWriter: def test_header(self): hdr = ei.WindowsScriptWriter.get_script_header('') assert hdr.startswith('#!') assert hdr.endswith('\n') hdr = hdr.lstrip('#!') hdr = hdr.rstrip('\n') # header should not start with an escaped quote assert not hdr.startswith('\\"')
spirrello/spirrello-pynet-work
applied_python/lib/python2.7/site-packages/setuptools/tests/test_easy_install.py
applied_python/lib/python2.7/site-packages/setuptools/tests/test_packageindex.py
"""Rdb: Remote debugger Given the following configuration in conf/rdb.yaml:: breakpoints: - subject: Brief explanation of a problem exceptions: - cfme.exceptions.ImportableExampleException - BuiltinException (e.g. ValueError) recipients: - user@example.com Any time an exception listed in a breakpoint's "exceptions" list is raised in :py:func:`rdb_catch` context in the course of a test run, a remote debugger will be started on a random port, and the users listed in "recipients" will be emailed instructions to access the remote debugger via telnet. The exceptions will be imported, so their fully-qualified importable path is required. Exceptions without a module path are assumed to be builtins. An Rdb instance can be used just like a :py:class:`Pdb <python:Pdb>` instance. Additionally, a signal handler has been set up to allow for triggering Rdb during a test run. To invoke it, ``kill -USR1`` a test-running process and Rdb will start up. No emails are sent when operating in this mode, so check the py.test console for the endpoint address. By default, Rdb assumes that there is a working MTA available on localhost, but this can be configured in ``conf['env']['smtp']['server']``. Note: This is very insecure, and should be used as a last resort for debugging elusive failures. """ from __future__ import print_function import os import signal import smtplib import socket import sys from contextlib import contextmanager from email.mime.text import MIMEText from importlib import import_module from pdb import Pdb from textwrap import dedent from cfme.fixtures.pytest_store import store from cfme.fixtures.pytest_store import write_line from cfme.utils import conf from cfme.utils.log import logger _breakpoint_exceptions = {} # defaults smtp_conf = { 'server': '127.0.0.1' } # Update defaults from conf smtp_conf.update(conf.env.get('smtp', {})) for breakpoint in (conf.rdb.get('breakpoints') or []): for i, exc_name in enumerate(breakpoint['exceptions']): split_exc = exc_name.rsplit('.', 1) if len(split_exc) == 1: # If no module is given to import from, assume builtin split_exc = ['__builtin__', exc_name] exc = getattr(import_module(split_exc[0]), split_exc[1]) # stash exceptions for easy matching in exception handlers _breakpoint_exceptions[exc] = breakpoint def rdb_handle_signal(signal, frame): # only registered for USR1, no need to inspect the signal, # just hand the frame off to Rdb Rdb('Debugger started on user signal').set_trace(frame) signal.signal(signal.SIGUSR1, rdb_handle_signal) # XXX: Pdb (and its bases) are old-style classobjs, so don't use super class Rdb(Pdb): """Remote Debugger When set_trace is called, it will open a socket on a random unprivileged port connected to a Pdb debugging session. This session can be accessed via telnet, and will end when "continue" is called in the Pdb session. """ def __init__(self, prompt_msg=''): self._prompt_msg = str(prompt_msg) self._stdout = sys.stdout self._stdin = sys.stdin self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # bind to random port self.sock.bind(('0.0.0.0', 0)) def do_continue(self, arg): sys.stdout = self._stdout sys.stdin = self._stdin self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() self.set_continue() return 1 do_c = do_cont = do_continue def interaction(self, *args, **kwargs): print(self._prompt_msg, stream=self.stdout) Pdb.interaction(self, *args, **kwargs) def set_trace(self, *args, **kwargs): """Start a pdb debugger available via telnet, and optionally email people the endpoint The endpoint will always be seen in the py.test runner output. Keyword Args: recipients: A list where, if set, an email will be sent to email addresses in this list. subject: If set, an optional custom email subject """ host, port = self.sock.getsockname() endpoint = 'host {} port {}'.format(store.my_ip_address, port) recipients = kwargs.pop('recipients', None) if recipients: # write and send an email subject = kwargs.pop('subject', 'RDB Breakpoint: Manually invoked') body = dedent("""\ A py.test run encountered an error. The remote debugger is running on {} (TCP), waiting for telnet connection. """).format(endpoint) try: smtp_server = smtp_conf['server'] smtp = smtplib.SMTP(smtp_server) msg = MIMEText(body) msg['Subject'] = subject msg['To'] = ', '.join(recipients) smtp.sendmail('rdb-breakpoint@example.com', recipients, msg.as_string()) except socket.error: logger.critical("Couldn't send email") msg = 'Remote debugger listening on {}'.format(endpoint) logger.critical(msg) write_line(msg, red=True, bold=True) self.sock.listen(1) (client_socket, address) = self.sock.accept() client_fh = client_socket.makefile('rw') Pdb.__init__(self, completekey='tab', stdin=client_fh, stdout=client_fh) sys.stdout = sys.stdin = client_fh Pdb.set_trace(self, *args, **kwargs) msg = 'Debugger on {} shut down'.format(endpoint) logger.critical(msg) write_line(msg, green=True, bold=True) def send_breakpoint_email(exctype, msg=''): job_name = os.environ.get('JOB_NAME', 'Non-jenkins') breakpoint = _breakpoint_exceptions[exctype] subject = 'RDB Breakpoint: {} {}'.format(job_name, breakpoint['subject']) rdb = Rdb(msg) rdb.set_trace(subject=subject, recipients=breakpoint['recipients']) def pytest_internalerror(excrepr, excinfo): if excinfo.type in _breakpoint_exceptions: msg = "A py.test internal error has triggered RDB:\n" msg += str(excrepr) send_breakpoint_email(excinfo.type, msg) @contextmanager def rdb_catch(): """Context Manager used to wrap mysterious failures for remote debugging.""" try: yield except tuple(_breakpoint_exceptions) as exc: send_breakpoint_email(type(exc))
# -*- coding: utf-8 -*- from collections import namedtuple from datetime import datetime import fauxfactory import pytest from widgetastic.widget import Text from cfme import test_requirements from cfme.control.explorer import conditions from cfme.control.explorer import ControlExplorerView from cfme.control.explorer.alert_profiles import ServerAlertProfile from cfme.control.explorer.policies import VMCompliancePolicy from cfme.control.explorer.policies import VMControlPolicy from cfme.exceptions import CFMEExceptionOccured from cfme.tests.control.test_basic import CONDITIONS from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ test_requirements.control, pytest.mark.tier(3) ] BAD_CONDITIONS = [ conditions.ReplicatorCondition, conditions.PodCondition, conditions.ContainerNodeCondition, conditions.ContainerImageCondition, conditions.ProviderCondition ] def create_policy(request, collection): args = (VMControlPolicy, fauxfactory.gen_alpha()) kwargs = {} policy = collection.create(*args) @request.addfinalizer def _delete(): while policy.exists: policy.delete() return args, kwargs def create_condition(request, collection): args = ( conditions.VMCondition, fauxfactory.gen_alpha(), "fill_field(VM and Instance : Boot Time, BEFORE, Today)" ) kwargs = {} condition = collection.create(*args) @request.addfinalizer def _delete(): while condition.exists: condition.delete() return args, kwargs def create_action(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = { "action_type": "Tag", "action_values": {"tag": ("My Company Tags", "Department", "Accounting")} } action = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while action.exists: action.delete() return args, kwargs def create_alert(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = {"timeline_event": True, "driving_event": "Hourly Timer"} alert = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while alert.exists: alert.delete() return args, kwargs ProfileCreateFunction = namedtuple('ProfileCreateFunction', ['name', 'fn']) items = [ ProfileCreateFunction("Policies", create_policy), ProfileCreateFunction("Conditions", create_condition), ProfileCreateFunction("Actions", create_action), ProfileCreateFunction("Alerts", create_alert) ] @pytest.fixture(scope="module") def collections(appliance): return { "Policies": appliance.collections.policies, "Conditions": appliance.collections.conditions, "Actions": appliance.collections.actions, "Alerts": appliance.collections.alerts } @pytest.fixture def vmware_vm(request, virtualcenter_provider): vm = virtualcenter_provider.appliance.collections.infra_vms.instantiate( random_vm_name("control"), virtualcenter_provider ) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.cleanup_on_provider) return vm @pytest.fixture def hardware_reconfigured_alert(appliance): alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), evaluate=("Hardware Reconfigured", {"hardware_attribute": "RAM"}), timeline_event=True ) yield alert alert.delete() @pytest.fixture def setup_disk_usage_alert(appliance): # get the current time timestamp = datetime.now() # setup the DB query table = appliance.db.client['miq_alert_statuses'] query = appliance.db.client.session.query(table.description, table.evaluated_on) # configure the advanced settings and place a large file on the appliance # disk usage above 1 % will now trigger a disk_usage event appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": 1}}} ) # create a 1 GB file on /var/www/miq/vmdb/log result = appliance.ssh_client.run_command( "dd if=/dev/zero of=/var/www/miq/vmdb/log/delete_me.txt count=1024 bs=1048576" ) # verify that the command was successful assert not result.failed # setup the alert for firing expression = {"expression": "fill_count(Server.EVM Workers, >, 0)"} alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), based_on='Server', evaluate=("Expression (Custom)", expression), driving_event="Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage", notification_frequency="1 Minute", ) alert_profile = appliance.collections.alert_profiles.create( ServerAlertProfile, "Alert profile for {}".format(alert.description), alerts=[alert] ) alert_profile.assign_to("Selected Servers", selections=["Servers", "EVM"]) yield alert, timestamp, query alert_profile.delete() alert.delete() appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": "<<reset>>"}}} ) result = appliance.ssh_client.run_command("rm /var/www/miq/vmdb/log/delete_me.txt") # verify that the command was successful assert not result.failed @pytest.mark.meta(blockers=[BZ(1155284)]) def test_scope_windows_registry_stuck(request, appliance, infra_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/6h """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = appliance.collections.policy_profiles.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM # not assuming tht infra_provider is actually an InfraProvider type vm = infra_provider.appliance.collections.infra_vms.all()[0] vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(appliance.server, 'Dashboard') view = navigate_to(appliance.collections.infra_vms, 'All') assert "except" not in view.entities.title.text.lower() vm.unassign_policy_profiles(profile.description) @pytest.mark.meta(blockers=[BZ(1243357)], automates=[1243357]) def test_invoke_custom_automation(request, appliance): """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear. Steps: * Go create new action, select Invoke Custom Automation * The form with additional fields should appear Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6h """ # The action is to have all possible fields filled, that way we can ensure it is good action = appliance.collections.actions.create( fauxfactory.gen_alpha(), "Invoke a Custom Automation", dict( message=fauxfactory.gen_alpha(), request=fauxfactory.gen_alpha(), attribute_1=fauxfactory.gen_alpha(), value_1=fauxfactory.gen_alpha(), attribute_2=fauxfactory.gen_alpha(), value_2=fauxfactory.gen_alpha(), attribute_3=fauxfactory.gen_alpha(), value_3=fauxfactory.gen_alpha(), attribute_4=fauxfactory.gen_alpha(), value_4=fauxfactory.gen_alpha(), attribute_5=fauxfactory.gen_alpha(), value_5=fauxfactory.gen_alpha() ) ) request.addfinalizer(lambda: action.delete() if action.exists else None) @pytest.mark.meta(blockers=[BZ(1375093)], automates=[1375093]) def test_check_compliance_history(request, virtualcenter_provider, vmware_vm, appliance): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Control """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name) ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy_profile = appliance.collections.policy_profiles.create( policy.description, policies=[policy] ) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles( policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name) @pytest.mark.meta(blockers=[BZ(1395965), BZ(1491576)]) def test_delete_all_actions_from_compliance_policy(request, appliance): """We should not allow a compliance policy to be saved if there are no actions on the compliance event. Steps: * Create a compliance policy * Remove all actions Result: The policy shouldn't be saved. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium caseposneg: negative initialEstimate: 1/12h """ policy = appliance.collections.policies.create( VMCompliancePolicy, fauxfactory.gen_alphanumeric() ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) with pytest.raises(AssertionError): policy.assign_actions_to_event("VM Compliance Check", []) @pytest.mark.parametrize("create_function", items, ids=[item.name for item in items]) def test_control_identical_descriptions(request, create_function, collections, appliance): """CFME should not allow to create policy, alerts, profiles, actions and others to be created if the item with the same description already exists. Steps: * Create an item * Create the same item again Result: The item shouldn't be created. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ args, kwargs = create_function.fn(request, collections[create_function.name]) flash = appliance.browser.create_view(ControlExplorerView).flash try: collections[create_function.name].create(*args, **kwargs) except (TimedOutError, AssertionError): flash.assert_message("Description has already been taken") # force navigation away from the page so the browser is not stuck on the edit page navigate_to(appliance.server, 'ControlExplorer', force=True) @pytest.mark.meta(blockers=[BZ(1231889)], automates=[1231889]) def test_vmware_alarm_selection_does_not_fail(request, appliance): """Test the bug that causes CFME UI to explode when VMware Alarm type is selected. We assert that the alert using this type is simply created. Then we destroy the alert. Metadata: test_flag: alerts Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ try: alert = appliance.collections.alerts.create( "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)), active=True, based_on="VM and Instance", evaluate=("VMware Alarm", {}), notification_frequency="5 Minutes" ) request.addfinalizer(lambda: alert.delete() if alert.exists else None) except CFMEExceptionOccured as e: pytest.fail("CFME has thrown an error: {}".format(str(e))) def test_alert_ram_reconfigured(hardware_reconfigured_alert): """Tests the bug when it was not possible to save an alert with RAM option in hardware attributes. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12h """ view = navigate_to(hardware_reconfigured_alert, "Details") attr = view.hardware_reconfigured_parameters.get_text_of("Hardware Attribute") assert attr == "RAM Increased" @pytest.mark.tier(2) @test_requirements.alert def test_alert_for_disk_usage(setup_disk_usage_alert): """ Bugzilla: 1658670 1672698 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6hr testSteps: 1. Go to Control > Explorer > Alerts 2. Configuration > Add new alert 3. Based on = Server 4. What to evaluate = Expression (Custom) 5. Driving Event = "Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage" 6. Assign the alert to a Alert Profile 7. Assign the Alert Profile to the Server 8. In advanced config, change: events: :disk_usage_gt_percent: 80 to: events: :disk_usage_gt_percent: 1 9. dd a file in /var/www/miq/vmdb/log large enough to trigger 1% disk usage expectedResults: 1. 2. 3. 4. 5. 6. 7. 8. 9. the alert should fire, and the event of type "evm_server_log_disk_usage" should trigger """ alert, timestamp, query = setup_disk_usage_alert def _check_query(): query_result = query.all() if query_result: # here query_result[0][0] and query_result[0][1] correspond to the description and # timestamp pulled from the database, respectively return alert.description == query_result[0][0] and timestamp < query_result[0][1] else: return False # wait for the alert to appear in the miq_alert_statuses table wait_for( _check_query, delay=5, num_sec=600, message="Waiting for alert {} to appear in DB".format(alert.description) ) @pytest.mark.parametrize( "condition_class", CONDITIONS, ids=lambda condition_class: condition_class.__name__ ) def test_accordion_after_condition_creation(appliance, condition_class): """ Bugzilla: 1683697 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12hr For this test, we must make a condition 'manually' and so that we can access the view during the condition creation. """ if BZ(1683697).blocks and condition_class in BAD_CONDITIONS: pytest.skip("Skipping because {} conditions are impacted by BZ 1683697" .format(condition_class.__name__)) condition = appliance.collections.conditions.create(condition_class, fauxfactory.gen_alpha(), expression="fill_field({} : Name, IS NOT EMPTY)".format( condition_class.FIELD_VALUE) ) view = condition.create_view(conditions.ConditionDetailsView, wait="10s") assert view.conditions.tree.currently_selected == [ "All Conditions", "{} Conditions".format(condition_class.TREE_NODE), condition.description ]
RedHatQE/cfme_tests
cfme/tests/control/test_bugs.py
cfme/fixtures/rdb.py
import attr from navmazing import NavigateToAttribute from .definition_views import GenericObjectActionsDetailsView from .definition_views import GenericObjectAddButtonView from .definition_views import GenericObjectButtonGroupAddView from .definition_views import GenericObjectButtonGroupDetailsView from .definition_views import GenericObjectDefinitionAllView from .definition_views import GenericObjectDefinitionDetailsView from cfme.exceptions import OptionNotAvailable from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.appliance.implementations.ui import CFMENavigateStep from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.appliance.implementations.ui import navigator @attr.s class GenericObjectButton(BaseEntity): name = attr.ib() description = attr.ib() image = attr.ib() request = attr.ib() button_type = attr.ib(default='Default') display = attr.ib(default=True) dialog = attr.ib(default=None) open_url = attr.ib(default=None) display_for = attr.ib(default=None) submit_version = attr.ib(default=None) system_message = attr.ib(default=None) attributes = attr.ib(default=None) role = attr.ib(default=None) button_group = attr.ib(default=None) def delete(self, cancel=False): """Delete generic object button Args: cancel(bool): By default button will be deleted, pass True to cancel deletion """ view = navigate_to(self, 'Details') view.configuration.item_select('Remove this Button from Inventory', handle_alert=not cancel) view = self.create_view(GenericObjectDefinitionAllView) assert view.is_displayed view.flash.assert_no_error() @attr.s class GenericObjectButtonsCollection(BaseCollection): ENTITY = GenericObjectButton def create(self, name, description, request, image, button_type='Default', display=True, dialog=None, open_url=None, display_for=None, submit_version=None, system_message=None, attributes=None, role=None, cancel=False): """Add button to generic object definition or button group Args: name(str): button name description(str): button description request(str): button request type image(str): button image button_type(str): button type display(bool): parameter to display button on UI or not, default is True dialog(str): button dialog open_url(str): button open_url display_for(str): for which item this button should be displayed submit_version(str): how this button should be submited, ex. 'One by one' system_message(str): button submit message attributes(dict): button attributes ex. {'address': 'string'} role: role used for button cancel(bool): cancel button creation, default if False Returns: button object """ view = navigate_to(self, 'Add') view.fill({ 'button_type': button_type, 'name': name, 'description': description, 'display': display, 'image': image, 'dialog': dialog, 'open_url': open_url, 'display_for': display_for, 'request': request, 'submit_version': submit_version, 'system_message': system_message }) if attributes: for name, type in attributes.items(): view.attribute_value_table.fill([{'Name': name}, {'Value': type}]) if isinstance(role, dict): view.role.select('<By Role>') # todo select roles if cancel: view.cancel.click() else: view.add.click() view.flash.assert_no_error() return self.instantiate(name=name, description=description, request=request, image=image, button_type=button_type, display=display, dialog=dialog, open_url=open_url, display_for=display_for, submit_version=submit_version, system_message=system_message, attributes=attributes, role=role) def all(self): """All existing buttons Returns: list of buttons objects """ buttons = [] view = navigate_to(self, 'All') for row in view.button_table: image_class = view.browser.get_attribute( 'class', view.browser.element('./i', parent=row[0].__locator__())) buttons.append(self.instantiate(name=row.text.text, description=row.hover_text.text, image=image_class.split(' ')[1])) return buttons @navigator.register(GenericObjectButtonsCollection, 'Add') class ButtonAdd(CFMENavigateStep): VIEW = GenericObjectAddButtonView prerequisite = NavigateToAttribute('parent', 'Details') def step(self, *args, **kwargs): self.prerequisite_view.configuration.item_select('Add a new Button') @navigator.register(GenericObjectButtonsCollection, 'All') class ButtonAll(CFMENavigateStep): @property def VIEW(self): # noqa if isinstance(self.obj.parent, GenericObjectButtonGroup): return GenericObjectButtonGroupDetailsView else: return GenericObjectActionsDetailsView prerequisite = NavigateToAttribute('parent', 'Details') def step(self, *args, **kwargs): if not isinstance(self.obj.parent, GenericObjectButtonGroup): self.prerequisite_view.accordion.classes.tree.click_path( 'All Generic Object Classes', self.obj.parent.name, 'Actions') @navigator.register(GenericObjectButton, 'Details') class ButtonDetails(CFMENavigateStep): VIEW = GenericObjectAddButtonView prerequisite = NavigateToAttribute('parent', 'Details') def step(self, *args, **kwargs): self.prerequisite_view.configuration.item_select('Add a new Button') @attr.s class GenericObjectButtonGroup(BaseEntity): name = attr.ib() description = attr.ib() image = attr.ib() display = attr.ib(default=True) _collections = {'generic_object_buttons': GenericObjectButtonsCollection} def delete(self, cancel=False): """Delete generic object button group Args: cancel(bool): By default group will be deleted, pass True to cancel deletion """ view = navigate_to(self, 'Details') if not view.toolbar.configuration.item_enabled('Remove this Button Group from Inventory'): raise OptionNotAvailable( "Remove this Button Group is not enabled, there are buttons assigned to this group") else: view.configuration.item_select( 'Remove this Button Group from Inventory', handle_alert=not cancel) view = self.create_view(GenericObjectDefinitionAllView) assert view.is_displayed view.flash.assert_no_error() @attr.s class GenericObjectButtonGroupsCollection(BaseCollection): ENTITY = GenericObjectButtonGroup def create(self, name, description, image, display=True, cancel=False): """Add button group for generic object definition Args: name(str): button group name description(str): button group description image(str): button group image display(bool): parameter to display button group on UI or not, default is True cancel(bool): cancel button creation, default if False Returns: button group object """ view = navigate_to(self, 'Add') view.fill({ 'image': image, 'name': name, 'description': description, 'display': display, }) if cancel: view.cancel.click() else: view.add.click() view = self.parent.create_view(GenericObjectDefinitionDetailsView) assert view.is_displayed view.flash.assert_no_error() group = self.instantiate(name=name, description=description, image=image, display=display) return group def all(self): """All existing button groups Returns: list of button groups objects """ groups = [] view = navigate_to(self, 'All') all_groups = view.group_table for row in all_groups: image_class = view.browser.get_attribute( 'class', view.browser.element('./i', parent=row[0].__locator__())) groups.append(self.instantiate(name=row.text.text, description=row.hover_text.text, image=image_class.split(' ')[1])) return groups @navigator.register(GenericObjectButtonGroupsCollection, 'All') class ButtonGroupAll(CFMENavigateStep): VIEW = GenericObjectActionsDetailsView prerequisite = NavigateToAttribute('parent', 'Details') def step(self, *args, **kwargs): self.prerequisite_view.accordion.classes.tree.click_path( 'All Generic Object Classes', self.obj.parent.name, 'Actions') @navigator.register(GenericObjectButtonGroupsCollection, 'Add') class ButtonGroupAdd(CFMENavigateStep): VIEW = GenericObjectButtonGroupAddView prerequisite = NavigateToAttribute('parent', 'Details') def step(self, *args, **kwargs): self.prerequisite_view.configuration.item_select('Add a new Button Group') @navigator.register(GenericObjectButtonGroup, 'Details') class ButtonGroupDetails(CFMENavigateStep): VIEW = GenericObjectButtonGroupDetailsView prerequisite = NavigateToAttribute('parent', 'All') def step(self, *args, **kwargs): self.prerequisite_view.accordion.classes.tree.click_path( 'All Generic Object Classes', self.obj.parent.parent.name, 'Actions', '{} (Group)'.format(self.obj.name))
# -*- coding: utf-8 -*- from collections import namedtuple from datetime import datetime import fauxfactory import pytest from widgetastic.widget import Text from cfme import test_requirements from cfme.control.explorer import conditions from cfme.control.explorer import ControlExplorerView from cfme.control.explorer.alert_profiles import ServerAlertProfile from cfme.control.explorer.policies import VMCompliancePolicy from cfme.control.explorer.policies import VMControlPolicy from cfme.exceptions import CFMEExceptionOccured from cfme.tests.control.test_basic import CONDITIONS from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ test_requirements.control, pytest.mark.tier(3) ] BAD_CONDITIONS = [ conditions.ReplicatorCondition, conditions.PodCondition, conditions.ContainerNodeCondition, conditions.ContainerImageCondition, conditions.ProviderCondition ] def create_policy(request, collection): args = (VMControlPolicy, fauxfactory.gen_alpha()) kwargs = {} policy = collection.create(*args) @request.addfinalizer def _delete(): while policy.exists: policy.delete() return args, kwargs def create_condition(request, collection): args = ( conditions.VMCondition, fauxfactory.gen_alpha(), "fill_field(VM and Instance : Boot Time, BEFORE, Today)" ) kwargs = {} condition = collection.create(*args) @request.addfinalizer def _delete(): while condition.exists: condition.delete() return args, kwargs def create_action(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = { "action_type": "Tag", "action_values": {"tag": ("My Company Tags", "Department", "Accounting")} } action = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while action.exists: action.delete() return args, kwargs def create_alert(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = {"timeline_event": True, "driving_event": "Hourly Timer"} alert = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while alert.exists: alert.delete() return args, kwargs ProfileCreateFunction = namedtuple('ProfileCreateFunction', ['name', 'fn']) items = [ ProfileCreateFunction("Policies", create_policy), ProfileCreateFunction("Conditions", create_condition), ProfileCreateFunction("Actions", create_action), ProfileCreateFunction("Alerts", create_alert) ] @pytest.fixture(scope="module") def collections(appliance): return { "Policies": appliance.collections.policies, "Conditions": appliance.collections.conditions, "Actions": appliance.collections.actions, "Alerts": appliance.collections.alerts } @pytest.fixture def vmware_vm(request, virtualcenter_provider): vm = virtualcenter_provider.appliance.collections.infra_vms.instantiate( random_vm_name("control"), virtualcenter_provider ) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.cleanup_on_provider) return vm @pytest.fixture def hardware_reconfigured_alert(appliance): alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), evaluate=("Hardware Reconfigured", {"hardware_attribute": "RAM"}), timeline_event=True ) yield alert alert.delete() @pytest.fixture def setup_disk_usage_alert(appliance): # get the current time timestamp = datetime.now() # setup the DB query table = appliance.db.client['miq_alert_statuses'] query = appliance.db.client.session.query(table.description, table.evaluated_on) # configure the advanced settings and place a large file on the appliance # disk usage above 1 % will now trigger a disk_usage event appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": 1}}} ) # create a 1 GB file on /var/www/miq/vmdb/log result = appliance.ssh_client.run_command( "dd if=/dev/zero of=/var/www/miq/vmdb/log/delete_me.txt count=1024 bs=1048576" ) # verify that the command was successful assert not result.failed # setup the alert for firing expression = {"expression": "fill_count(Server.EVM Workers, >, 0)"} alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), based_on='Server', evaluate=("Expression (Custom)", expression), driving_event="Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage", notification_frequency="1 Minute", ) alert_profile = appliance.collections.alert_profiles.create( ServerAlertProfile, "Alert profile for {}".format(alert.description), alerts=[alert] ) alert_profile.assign_to("Selected Servers", selections=["Servers", "EVM"]) yield alert, timestamp, query alert_profile.delete() alert.delete() appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": "<<reset>>"}}} ) result = appliance.ssh_client.run_command("rm /var/www/miq/vmdb/log/delete_me.txt") # verify that the command was successful assert not result.failed @pytest.mark.meta(blockers=[BZ(1155284)]) def test_scope_windows_registry_stuck(request, appliance, infra_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/6h """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = appliance.collections.policy_profiles.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM # not assuming tht infra_provider is actually an InfraProvider type vm = infra_provider.appliance.collections.infra_vms.all()[0] vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(appliance.server, 'Dashboard') view = navigate_to(appliance.collections.infra_vms, 'All') assert "except" not in view.entities.title.text.lower() vm.unassign_policy_profiles(profile.description) @pytest.mark.meta(blockers=[BZ(1243357)], automates=[1243357]) def test_invoke_custom_automation(request, appliance): """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear. Steps: * Go create new action, select Invoke Custom Automation * The form with additional fields should appear Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6h """ # The action is to have all possible fields filled, that way we can ensure it is good action = appliance.collections.actions.create( fauxfactory.gen_alpha(), "Invoke a Custom Automation", dict( message=fauxfactory.gen_alpha(), request=fauxfactory.gen_alpha(), attribute_1=fauxfactory.gen_alpha(), value_1=fauxfactory.gen_alpha(), attribute_2=fauxfactory.gen_alpha(), value_2=fauxfactory.gen_alpha(), attribute_3=fauxfactory.gen_alpha(), value_3=fauxfactory.gen_alpha(), attribute_4=fauxfactory.gen_alpha(), value_4=fauxfactory.gen_alpha(), attribute_5=fauxfactory.gen_alpha(), value_5=fauxfactory.gen_alpha() ) ) request.addfinalizer(lambda: action.delete() if action.exists else None) @pytest.mark.meta(blockers=[BZ(1375093)], automates=[1375093]) def test_check_compliance_history(request, virtualcenter_provider, vmware_vm, appliance): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Control """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name) ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy_profile = appliance.collections.policy_profiles.create( policy.description, policies=[policy] ) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles( policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name) @pytest.mark.meta(blockers=[BZ(1395965), BZ(1491576)]) def test_delete_all_actions_from_compliance_policy(request, appliance): """We should not allow a compliance policy to be saved if there are no actions on the compliance event. Steps: * Create a compliance policy * Remove all actions Result: The policy shouldn't be saved. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium caseposneg: negative initialEstimate: 1/12h """ policy = appliance.collections.policies.create( VMCompliancePolicy, fauxfactory.gen_alphanumeric() ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) with pytest.raises(AssertionError): policy.assign_actions_to_event("VM Compliance Check", []) @pytest.mark.parametrize("create_function", items, ids=[item.name for item in items]) def test_control_identical_descriptions(request, create_function, collections, appliance): """CFME should not allow to create policy, alerts, profiles, actions and others to be created if the item with the same description already exists. Steps: * Create an item * Create the same item again Result: The item shouldn't be created. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ args, kwargs = create_function.fn(request, collections[create_function.name]) flash = appliance.browser.create_view(ControlExplorerView).flash try: collections[create_function.name].create(*args, **kwargs) except (TimedOutError, AssertionError): flash.assert_message("Description has already been taken") # force navigation away from the page so the browser is not stuck on the edit page navigate_to(appliance.server, 'ControlExplorer', force=True) @pytest.mark.meta(blockers=[BZ(1231889)], automates=[1231889]) def test_vmware_alarm_selection_does_not_fail(request, appliance): """Test the bug that causes CFME UI to explode when VMware Alarm type is selected. We assert that the alert using this type is simply created. Then we destroy the alert. Metadata: test_flag: alerts Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ try: alert = appliance.collections.alerts.create( "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)), active=True, based_on="VM and Instance", evaluate=("VMware Alarm", {}), notification_frequency="5 Minutes" ) request.addfinalizer(lambda: alert.delete() if alert.exists else None) except CFMEExceptionOccured as e: pytest.fail("CFME has thrown an error: {}".format(str(e))) def test_alert_ram_reconfigured(hardware_reconfigured_alert): """Tests the bug when it was not possible to save an alert with RAM option in hardware attributes. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12h """ view = navigate_to(hardware_reconfigured_alert, "Details") attr = view.hardware_reconfigured_parameters.get_text_of("Hardware Attribute") assert attr == "RAM Increased" @pytest.mark.tier(2) @test_requirements.alert def test_alert_for_disk_usage(setup_disk_usage_alert): """ Bugzilla: 1658670 1672698 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6hr testSteps: 1. Go to Control > Explorer > Alerts 2. Configuration > Add new alert 3. Based on = Server 4. What to evaluate = Expression (Custom) 5. Driving Event = "Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage" 6. Assign the alert to a Alert Profile 7. Assign the Alert Profile to the Server 8. In advanced config, change: events: :disk_usage_gt_percent: 80 to: events: :disk_usage_gt_percent: 1 9. dd a file in /var/www/miq/vmdb/log large enough to trigger 1% disk usage expectedResults: 1. 2. 3. 4. 5. 6. 7. 8. 9. the alert should fire, and the event of type "evm_server_log_disk_usage" should trigger """ alert, timestamp, query = setup_disk_usage_alert def _check_query(): query_result = query.all() if query_result: # here query_result[0][0] and query_result[0][1] correspond to the description and # timestamp pulled from the database, respectively return alert.description == query_result[0][0] and timestamp < query_result[0][1] else: return False # wait for the alert to appear in the miq_alert_statuses table wait_for( _check_query, delay=5, num_sec=600, message="Waiting for alert {} to appear in DB".format(alert.description) ) @pytest.mark.parametrize( "condition_class", CONDITIONS, ids=lambda condition_class: condition_class.__name__ ) def test_accordion_after_condition_creation(appliance, condition_class): """ Bugzilla: 1683697 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12hr For this test, we must make a condition 'manually' and so that we can access the view during the condition creation. """ if BZ(1683697).blocks and condition_class in BAD_CONDITIONS: pytest.skip("Skipping because {} conditions are impacted by BZ 1683697" .format(condition_class.__name__)) condition = appliance.collections.conditions.create(condition_class, fauxfactory.gen_alpha(), expression="fill_field({} : Name, IS NOT EMPTY)".format( condition_class.FIELD_VALUE) ) view = condition.create_view(conditions.ConditionDetailsView, wait="10s") assert view.conditions.tree.currently_selected == [ "All Conditions", "{} Conditions".format(condition_class.TREE_NODE), condition.description ]
RedHatQE/cfme_tests
cfme/tests/control/test_bugs.py
cfme/generic_objects/definition/button_groups.py
# -*- coding: utf-8 -*- import attr from navmazing import NavigateToAttribute from widgetastic.widget import NoSuchElementException from widgetastic.widget import Text from widgetastic.widget import View from widgetastic_patternfly import BootstrapNav from widgetastic_patternfly import BreadCrumb from widgetastic_patternfly import Button from widgetastic_patternfly import Dropdown from cfme.base.ui import BaseLoggedInPage from cfme.common import Taggable from cfme.exceptions import ItemNotFound from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.appliance.implementations.ui import CFMENavigateStep from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.appliance.implementations.ui import navigator from cfme.utils.providers import get_crud_by_name from widgetastic_manageiq import Accordion from widgetastic_manageiq import BaseEntitiesView from widgetastic_manageiq import ItemsToolBarViewSelector from widgetastic_manageiq import ManageIQTree from widgetastic_manageiq import Search from widgetastic_manageiq import SummaryTable class ObjectStoreObjectToolbar(View): """The toolbar on the Object Store Object page""" configuration = Dropdown('Configuration') policy = Dropdown('Policy') download = Dropdown('Download') view_selector = View.nested(ItemsToolBarViewSelector) class ObjectStoreObjectDetailsToolbar(View): """The toolbar on the Object Store Object detail page""" policy = Dropdown('Policy') download = Button(title='Download summary in PDF format') class ObjectStoreObjectDetailsEntities(View): """The entities on the Object Store Object detail page""" breadcrumb = BreadCrumb() properties = SummaryTable('Properties') relationships = SummaryTable('Relationships') smart_management = SummaryTable('Smart Management') class ObjectStoreObjectDetailsSidebar(View): """The sidebar on the Object Store Object details page""" @View.nested class properties(Accordion): # noqa tree = ManageIQTree() @View.nested class relationships(Accordion): # noqa tree = ManageIQTree() class ObjectStoreObjectView(BaseLoggedInPage): """A base view for all the Object Store Object pages""" title = Text('.//div[@id="center_div" or @id="main-content"]//h1') @property def in_object(self): return ( self.logged_in_as_current_user and self.navigation.currently_selected == ['Storage', 'Object Storage', 'Object Store Objects']) class ObjectStoreObjectAllView(ObjectStoreObjectView): """The all Object Store Object page""" toolbar = View.nested(ObjectStoreObjectToolbar) search = View.nested(Search) including_entities = View.include(BaseEntitiesView, use_parent=True) @property def is_displayed(self): return ( self.in_object and self.title.text == 'Cloud Object Store Objects') @View.nested class my_filters(Accordion): # noqa ACCORDION_NAME = "My Filters" navigation = BootstrapNav('.//div/ul') tree = ManageIQTree() class ObjectStoreObjectDetailsView(ObjectStoreObjectView): """The detail Object Store Object page""" @property def is_displayed(self): expected_title = '{} (Summary)'.format(self.context['object'].key) return ( self.title.text == expected_title and self.entities.breadcrumb.active_location == expected_title) toolbar = View.nested(ObjectStoreObjectDetailsToolbar) sidebar = View.nested(ObjectStoreObjectDetailsSidebar) entities = View.nested(ObjectStoreObjectDetailsEntities) @attr.s class ObjectStoreObject(BaseEntity, Taggable): """ Model of an Storage Object Store Object in cfme Args: key: key of the object. provider: provider """ key = attr.ib() provider = attr.ib() @attr.s class ObjectStoreObjectCollection(BaseCollection): """Collection object for the :py:class:'cfme.storage.object_store_object.ObjStoreObject' """ ENTITY = ObjectStoreObject @property def manager(self): coll = self.appliance.collections.object_managers.filter( {"provider": self.filters.get('provider')} ) # For each provider has single object type storage manager return coll.all()[0] def all(self): """returning all Object Store Objects""" view = navigate_to(self, 'All') view.entities.paginator.set_items_per_page(500) objects = [] try: if 'provider'in self.filters: for item in view.entities.elements.read(): if self.filters['provider'].name in item['Cloud Provider']: objects.append(self.instantiate(key=item['Key'], provider=self.filters['provider'])) else: for item in view.entities.elements.read(): provider_name = item['Cloud Provider'].split()[0] provider = get_crud_by_name(provider_name) objects.append(self.instantiate(key=item['Key'], provider=provider)) return objects except NoSuchElementException: return None def delete(self, *objects): view = navigate_to(self, 'All') for obj in objects: try: view.entities.get_entity(key=obj.key, surf_pages=True).check() except ItemNotFound: raise ItemNotFound('Could not locate object {}'.format(obj.key)) view.toolbar.configuration.item_select('Remove Object Storage Objects', handle_alert=True) view.flash.assert_no_error() @navigator.register(ObjectStoreObjectCollection, 'All') class ObjectStoreObjectAll(CFMENavigateStep): VIEW = ObjectStoreObjectAllView prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn') def step(self, *args, **kwargs): self.prerequisite_view.navigation.select( 'Storage', 'Object Storage', 'Object Store Objects') @navigator.register(ObjectStoreObject, 'Details') class ObjectStoreObjectDetails(CFMENavigateStep): VIEW = ObjectStoreObjectDetailsView prerequisite = NavigateToAttribute('parent', 'All') def step(self, *args, **kwargs): try: self.prerequisite_view.entities.get_entity(key=self.obj.key, surf_pages=True).click() except ItemNotFound: raise ItemNotFound('Could not locate object {}'.format(self.obj.key))
# -*- coding: utf-8 -*- from collections import namedtuple from datetime import datetime import fauxfactory import pytest from widgetastic.widget import Text from cfme import test_requirements from cfme.control.explorer import conditions from cfme.control.explorer import ControlExplorerView from cfme.control.explorer.alert_profiles import ServerAlertProfile from cfme.control.explorer.policies import VMCompliancePolicy from cfme.control.explorer.policies import VMControlPolicy from cfme.exceptions import CFMEExceptionOccured from cfme.tests.control.test_basic import CONDITIONS from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ test_requirements.control, pytest.mark.tier(3) ] BAD_CONDITIONS = [ conditions.ReplicatorCondition, conditions.PodCondition, conditions.ContainerNodeCondition, conditions.ContainerImageCondition, conditions.ProviderCondition ] def create_policy(request, collection): args = (VMControlPolicy, fauxfactory.gen_alpha()) kwargs = {} policy = collection.create(*args) @request.addfinalizer def _delete(): while policy.exists: policy.delete() return args, kwargs def create_condition(request, collection): args = ( conditions.VMCondition, fauxfactory.gen_alpha(), "fill_field(VM and Instance : Boot Time, BEFORE, Today)" ) kwargs = {} condition = collection.create(*args) @request.addfinalizer def _delete(): while condition.exists: condition.delete() return args, kwargs def create_action(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = { "action_type": "Tag", "action_values": {"tag": ("My Company Tags", "Department", "Accounting")} } action = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while action.exists: action.delete() return args, kwargs def create_alert(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = {"timeline_event": True, "driving_event": "Hourly Timer"} alert = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while alert.exists: alert.delete() return args, kwargs ProfileCreateFunction = namedtuple('ProfileCreateFunction', ['name', 'fn']) items = [ ProfileCreateFunction("Policies", create_policy), ProfileCreateFunction("Conditions", create_condition), ProfileCreateFunction("Actions", create_action), ProfileCreateFunction("Alerts", create_alert) ] @pytest.fixture(scope="module") def collections(appliance): return { "Policies": appliance.collections.policies, "Conditions": appliance.collections.conditions, "Actions": appliance.collections.actions, "Alerts": appliance.collections.alerts } @pytest.fixture def vmware_vm(request, virtualcenter_provider): vm = virtualcenter_provider.appliance.collections.infra_vms.instantiate( random_vm_name("control"), virtualcenter_provider ) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.cleanup_on_provider) return vm @pytest.fixture def hardware_reconfigured_alert(appliance): alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), evaluate=("Hardware Reconfigured", {"hardware_attribute": "RAM"}), timeline_event=True ) yield alert alert.delete() @pytest.fixture def setup_disk_usage_alert(appliance): # get the current time timestamp = datetime.now() # setup the DB query table = appliance.db.client['miq_alert_statuses'] query = appliance.db.client.session.query(table.description, table.evaluated_on) # configure the advanced settings and place a large file on the appliance # disk usage above 1 % will now trigger a disk_usage event appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": 1}}} ) # create a 1 GB file on /var/www/miq/vmdb/log result = appliance.ssh_client.run_command( "dd if=/dev/zero of=/var/www/miq/vmdb/log/delete_me.txt count=1024 bs=1048576" ) # verify that the command was successful assert not result.failed # setup the alert for firing expression = {"expression": "fill_count(Server.EVM Workers, >, 0)"} alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), based_on='Server', evaluate=("Expression (Custom)", expression), driving_event="Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage", notification_frequency="1 Minute", ) alert_profile = appliance.collections.alert_profiles.create( ServerAlertProfile, "Alert profile for {}".format(alert.description), alerts=[alert] ) alert_profile.assign_to("Selected Servers", selections=["Servers", "EVM"]) yield alert, timestamp, query alert_profile.delete() alert.delete() appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": "<<reset>>"}}} ) result = appliance.ssh_client.run_command("rm /var/www/miq/vmdb/log/delete_me.txt") # verify that the command was successful assert not result.failed @pytest.mark.meta(blockers=[BZ(1155284)]) def test_scope_windows_registry_stuck(request, appliance, infra_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/6h """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = appliance.collections.policy_profiles.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM # not assuming tht infra_provider is actually an InfraProvider type vm = infra_provider.appliance.collections.infra_vms.all()[0] vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(appliance.server, 'Dashboard') view = navigate_to(appliance.collections.infra_vms, 'All') assert "except" not in view.entities.title.text.lower() vm.unassign_policy_profiles(profile.description) @pytest.mark.meta(blockers=[BZ(1243357)], automates=[1243357]) def test_invoke_custom_automation(request, appliance): """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear. Steps: * Go create new action, select Invoke Custom Automation * The form with additional fields should appear Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6h """ # The action is to have all possible fields filled, that way we can ensure it is good action = appliance.collections.actions.create( fauxfactory.gen_alpha(), "Invoke a Custom Automation", dict( message=fauxfactory.gen_alpha(), request=fauxfactory.gen_alpha(), attribute_1=fauxfactory.gen_alpha(), value_1=fauxfactory.gen_alpha(), attribute_2=fauxfactory.gen_alpha(), value_2=fauxfactory.gen_alpha(), attribute_3=fauxfactory.gen_alpha(), value_3=fauxfactory.gen_alpha(), attribute_4=fauxfactory.gen_alpha(), value_4=fauxfactory.gen_alpha(), attribute_5=fauxfactory.gen_alpha(), value_5=fauxfactory.gen_alpha() ) ) request.addfinalizer(lambda: action.delete() if action.exists else None) @pytest.mark.meta(blockers=[BZ(1375093)], automates=[1375093]) def test_check_compliance_history(request, virtualcenter_provider, vmware_vm, appliance): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Control """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name) ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy_profile = appliance.collections.policy_profiles.create( policy.description, policies=[policy] ) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles( policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name) @pytest.mark.meta(blockers=[BZ(1395965), BZ(1491576)]) def test_delete_all_actions_from_compliance_policy(request, appliance): """We should not allow a compliance policy to be saved if there are no actions on the compliance event. Steps: * Create a compliance policy * Remove all actions Result: The policy shouldn't be saved. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium caseposneg: negative initialEstimate: 1/12h """ policy = appliance.collections.policies.create( VMCompliancePolicy, fauxfactory.gen_alphanumeric() ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) with pytest.raises(AssertionError): policy.assign_actions_to_event("VM Compliance Check", []) @pytest.mark.parametrize("create_function", items, ids=[item.name for item in items]) def test_control_identical_descriptions(request, create_function, collections, appliance): """CFME should not allow to create policy, alerts, profiles, actions and others to be created if the item with the same description already exists. Steps: * Create an item * Create the same item again Result: The item shouldn't be created. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ args, kwargs = create_function.fn(request, collections[create_function.name]) flash = appliance.browser.create_view(ControlExplorerView).flash try: collections[create_function.name].create(*args, **kwargs) except (TimedOutError, AssertionError): flash.assert_message("Description has already been taken") # force navigation away from the page so the browser is not stuck on the edit page navigate_to(appliance.server, 'ControlExplorer', force=True) @pytest.mark.meta(blockers=[BZ(1231889)], automates=[1231889]) def test_vmware_alarm_selection_does_not_fail(request, appliance): """Test the bug that causes CFME UI to explode when VMware Alarm type is selected. We assert that the alert using this type is simply created. Then we destroy the alert. Metadata: test_flag: alerts Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ try: alert = appliance.collections.alerts.create( "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)), active=True, based_on="VM and Instance", evaluate=("VMware Alarm", {}), notification_frequency="5 Minutes" ) request.addfinalizer(lambda: alert.delete() if alert.exists else None) except CFMEExceptionOccured as e: pytest.fail("CFME has thrown an error: {}".format(str(e))) def test_alert_ram_reconfigured(hardware_reconfigured_alert): """Tests the bug when it was not possible to save an alert with RAM option in hardware attributes. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12h """ view = navigate_to(hardware_reconfigured_alert, "Details") attr = view.hardware_reconfigured_parameters.get_text_of("Hardware Attribute") assert attr == "RAM Increased" @pytest.mark.tier(2) @test_requirements.alert def test_alert_for_disk_usage(setup_disk_usage_alert): """ Bugzilla: 1658670 1672698 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6hr testSteps: 1. Go to Control > Explorer > Alerts 2. Configuration > Add new alert 3. Based on = Server 4. What to evaluate = Expression (Custom) 5. Driving Event = "Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage" 6. Assign the alert to a Alert Profile 7. Assign the Alert Profile to the Server 8. In advanced config, change: events: :disk_usage_gt_percent: 80 to: events: :disk_usage_gt_percent: 1 9. dd a file in /var/www/miq/vmdb/log large enough to trigger 1% disk usage expectedResults: 1. 2. 3. 4. 5. 6. 7. 8. 9. the alert should fire, and the event of type "evm_server_log_disk_usage" should trigger """ alert, timestamp, query = setup_disk_usage_alert def _check_query(): query_result = query.all() if query_result: # here query_result[0][0] and query_result[0][1] correspond to the description and # timestamp pulled from the database, respectively return alert.description == query_result[0][0] and timestamp < query_result[0][1] else: return False # wait for the alert to appear in the miq_alert_statuses table wait_for( _check_query, delay=5, num_sec=600, message="Waiting for alert {} to appear in DB".format(alert.description) ) @pytest.mark.parametrize( "condition_class", CONDITIONS, ids=lambda condition_class: condition_class.__name__ ) def test_accordion_after_condition_creation(appliance, condition_class): """ Bugzilla: 1683697 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12hr For this test, we must make a condition 'manually' and so that we can access the view during the condition creation. """ if BZ(1683697).blocks and condition_class in BAD_CONDITIONS: pytest.skip("Skipping because {} conditions are impacted by BZ 1683697" .format(condition_class.__name__)) condition = appliance.collections.conditions.create(condition_class, fauxfactory.gen_alpha(), expression="fill_field({} : Name, IS NOT EMPTY)".format( condition_class.FIELD_VALUE) ) view = condition.create_view(conditions.ConditionDetailsView, wait="10s") assert view.conditions.tree.currently_selected == [ "All Conditions", "{} Conditions".format(condition_class.TREE_NODE), condition.description ]
RedHatQE/cfme_tests
cfme/tests/control/test_bugs.py
cfme/storage/object_store_object.py
# -*- coding: utf-8 -*- """This module allows you to update an appliance with latest RHEL. It has two uses: 1) If only ``--update-appliance`` is specified, it will use the YAML url. 2) If you also specify one or more ``--update-url``, it will use them instead. """ import pytest def pytest_addoption(parser): group = parser.getgroup('cfme') group.addoption( '--update-appliance', dest='update_appliance', action='store_true', default=False, help="Enable updating an appliance before the first test is run.") group.addoption( '--update-url', dest='update_urls', action='append', default=[], help="URLs to update with. If none are passed, yaml key is used.") def pytest_sessionstart(session): if pytest.store.parallelizer_role == 'master': return if not session.config.getoption("update_appliance"): return pytest.store.write_line("Initiating appliance update ...") urls = session.config.getoption("update_urls") pytest.store.current_appliance.update_rhel(*urls, reboot=True) pytest.store.write_line("Appliance update finished, waiting for UI ...") pytest.store.current_appliance.wait_for_web_ui() pytest.store.write_line("Appliance update finished ...")
# -*- coding: utf-8 -*- from collections import namedtuple from datetime import datetime import fauxfactory import pytest from widgetastic.widget import Text from cfme import test_requirements from cfme.control.explorer import conditions from cfme.control.explorer import ControlExplorerView from cfme.control.explorer.alert_profiles import ServerAlertProfile from cfme.control.explorer.policies import VMCompliancePolicy from cfme.control.explorer.policies import VMControlPolicy from cfme.exceptions import CFMEExceptionOccured from cfme.tests.control.test_basic import CONDITIONS from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ test_requirements.control, pytest.mark.tier(3) ] BAD_CONDITIONS = [ conditions.ReplicatorCondition, conditions.PodCondition, conditions.ContainerNodeCondition, conditions.ContainerImageCondition, conditions.ProviderCondition ] def create_policy(request, collection): args = (VMControlPolicy, fauxfactory.gen_alpha()) kwargs = {} policy = collection.create(*args) @request.addfinalizer def _delete(): while policy.exists: policy.delete() return args, kwargs def create_condition(request, collection): args = ( conditions.VMCondition, fauxfactory.gen_alpha(), "fill_field(VM and Instance : Boot Time, BEFORE, Today)" ) kwargs = {} condition = collection.create(*args) @request.addfinalizer def _delete(): while condition.exists: condition.delete() return args, kwargs def create_action(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = { "action_type": "Tag", "action_values": {"tag": ("My Company Tags", "Department", "Accounting")} } action = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while action.exists: action.delete() return args, kwargs def create_alert(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = {"timeline_event": True, "driving_event": "Hourly Timer"} alert = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while alert.exists: alert.delete() return args, kwargs ProfileCreateFunction = namedtuple('ProfileCreateFunction', ['name', 'fn']) items = [ ProfileCreateFunction("Policies", create_policy), ProfileCreateFunction("Conditions", create_condition), ProfileCreateFunction("Actions", create_action), ProfileCreateFunction("Alerts", create_alert) ] @pytest.fixture(scope="module") def collections(appliance): return { "Policies": appliance.collections.policies, "Conditions": appliance.collections.conditions, "Actions": appliance.collections.actions, "Alerts": appliance.collections.alerts } @pytest.fixture def vmware_vm(request, virtualcenter_provider): vm = virtualcenter_provider.appliance.collections.infra_vms.instantiate( random_vm_name("control"), virtualcenter_provider ) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.cleanup_on_provider) return vm @pytest.fixture def hardware_reconfigured_alert(appliance): alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), evaluate=("Hardware Reconfigured", {"hardware_attribute": "RAM"}), timeline_event=True ) yield alert alert.delete() @pytest.fixture def setup_disk_usage_alert(appliance): # get the current time timestamp = datetime.now() # setup the DB query table = appliance.db.client['miq_alert_statuses'] query = appliance.db.client.session.query(table.description, table.evaluated_on) # configure the advanced settings and place a large file on the appliance # disk usage above 1 % will now trigger a disk_usage event appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": 1}}} ) # create a 1 GB file on /var/www/miq/vmdb/log result = appliance.ssh_client.run_command( "dd if=/dev/zero of=/var/www/miq/vmdb/log/delete_me.txt count=1024 bs=1048576" ) # verify that the command was successful assert not result.failed # setup the alert for firing expression = {"expression": "fill_count(Server.EVM Workers, >, 0)"} alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), based_on='Server', evaluate=("Expression (Custom)", expression), driving_event="Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage", notification_frequency="1 Minute", ) alert_profile = appliance.collections.alert_profiles.create( ServerAlertProfile, "Alert profile for {}".format(alert.description), alerts=[alert] ) alert_profile.assign_to("Selected Servers", selections=["Servers", "EVM"]) yield alert, timestamp, query alert_profile.delete() alert.delete() appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": "<<reset>>"}}} ) result = appliance.ssh_client.run_command("rm /var/www/miq/vmdb/log/delete_me.txt") # verify that the command was successful assert not result.failed @pytest.mark.meta(blockers=[BZ(1155284)]) def test_scope_windows_registry_stuck(request, appliance, infra_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/6h """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = appliance.collections.policy_profiles.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM # not assuming tht infra_provider is actually an InfraProvider type vm = infra_provider.appliance.collections.infra_vms.all()[0] vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(appliance.server, 'Dashboard') view = navigate_to(appliance.collections.infra_vms, 'All') assert "except" not in view.entities.title.text.lower() vm.unassign_policy_profiles(profile.description) @pytest.mark.meta(blockers=[BZ(1243357)], automates=[1243357]) def test_invoke_custom_automation(request, appliance): """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear. Steps: * Go create new action, select Invoke Custom Automation * The form with additional fields should appear Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6h """ # The action is to have all possible fields filled, that way we can ensure it is good action = appliance.collections.actions.create( fauxfactory.gen_alpha(), "Invoke a Custom Automation", dict( message=fauxfactory.gen_alpha(), request=fauxfactory.gen_alpha(), attribute_1=fauxfactory.gen_alpha(), value_1=fauxfactory.gen_alpha(), attribute_2=fauxfactory.gen_alpha(), value_2=fauxfactory.gen_alpha(), attribute_3=fauxfactory.gen_alpha(), value_3=fauxfactory.gen_alpha(), attribute_4=fauxfactory.gen_alpha(), value_4=fauxfactory.gen_alpha(), attribute_5=fauxfactory.gen_alpha(), value_5=fauxfactory.gen_alpha() ) ) request.addfinalizer(lambda: action.delete() if action.exists else None) @pytest.mark.meta(blockers=[BZ(1375093)], automates=[1375093]) def test_check_compliance_history(request, virtualcenter_provider, vmware_vm, appliance): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Control """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name) ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy_profile = appliance.collections.policy_profiles.create( policy.description, policies=[policy] ) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles( policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name) @pytest.mark.meta(blockers=[BZ(1395965), BZ(1491576)]) def test_delete_all_actions_from_compliance_policy(request, appliance): """We should not allow a compliance policy to be saved if there are no actions on the compliance event. Steps: * Create a compliance policy * Remove all actions Result: The policy shouldn't be saved. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium caseposneg: negative initialEstimate: 1/12h """ policy = appliance.collections.policies.create( VMCompliancePolicy, fauxfactory.gen_alphanumeric() ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) with pytest.raises(AssertionError): policy.assign_actions_to_event("VM Compliance Check", []) @pytest.mark.parametrize("create_function", items, ids=[item.name for item in items]) def test_control_identical_descriptions(request, create_function, collections, appliance): """CFME should not allow to create policy, alerts, profiles, actions and others to be created if the item with the same description already exists. Steps: * Create an item * Create the same item again Result: The item shouldn't be created. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ args, kwargs = create_function.fn(request, collections[create_function.name]) flash = appliance.browser.create_view(ControlExplorerView).flash try: collections[create_function.name].create(*args, **kwargs) except (TimedOutError, AssertionError): flash.assert_message("Description has already been taken") # force navigation away from the page so the browser is not stuck on the edit page navigate_to(appliance.server, 'ControlExplorer', force=True) @pytest.mark.meta(blockers=[BZ(1231889)], automates=[1231889]) def test_vmware_alarm_selection_does_not_fail(request, appliance): """Test the bug that causes CFME UI to explode when VMware Alarm type is selected. We assert that the alert using this type is simply created. Then we destroy the alert. Metadata: test_flag: alerts Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ try: alert = appliance.collections.alerts.create( "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)), active=True, based_on="VM and Instance", evaluate=("VMware Alarm", {}), notification_frequency="5 Minutes" ) request.addfinalizer(lambda: alert.delete() if alert.exists else None) except CFMEExceptionOccured as e: pytest.fail("CFME has thrown an error: {}".format(str(e))) def test_alert_ram_reconfigured(hardware_reconfigured_alert): """Tests the bug when it was not possible to save an alert with RAM option in hardware attributes. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12h """ view = navigate_to(hardware_reconfigured_alert, "Details") attr = view.hardware_reconfigured_parameters.get_text_of("Hardware Attribute") assert attr == "RAM Increased" @pytest.mark.tier(2) @test_requirements.alert def test_alert_for_disk_usage(setup_disk_usage_alert): """ Bugzilla: 1658670 1672698 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6hr testSteps: 1. Go to Control > Explorer > Alerts 2. Configuration > Add new alert 3. Based on = Server 4. What to evaluate = Expression (Custom) 5. Driving Event = "Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage" 6. Assign the alert to a Alert Profile 7. Assign the Alert Profile to the Server 8. In advanced config, change: events: :disk_usage_gt_percent: 80 to: events: :disk_usage_gt_percent: 1 9. dd a file in /var/www/miq/vmdb/log large enough to trigger 1% disk usage expectedResults: 1. 2. 3. 4. 5. 6. 7. 8. 9. the alert should fire, and the event of type "evm_server_log_disk_usage" should trigger """ alert, timestamp, query = setup_disk_usage_alert def _check_query(): query_result = query.all() if query_result: # here query_result[0][0] and query_result[0][1] correspond to the description and # timestamp pulled from the database, respectively return alert.description == query_result[0][0] and timestamp < query_result[0][1] else: return False # wait for the alert to appear in the miq_alert_statuses table wait_for( _check_query, delay=5, num_sec=600, message="Waiting for alert {} to appear in DB".format(alert.description) ) @pytest.mark.parametrize( "condition_class", CONDITIONS, ids=lambda condition_class: condition_class.__name__ ) def test_accordion_after_condition_creation(appliance, condition_class): """ Bugzilla: 1683697 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12hr For this test, we must make a condition 'manually' and so that we can access the view during the condition creation. """ if BZ(1683697).blocks and condition_class in BAD_CONDITIONS: pytest.skip("Skipping because {} conditions are impacted by BZ 1683697" .format(condition_class.__name__)) condition = appliance.collections.conditions.create(condition_class, fauxfactory.gen_alpha(), expression="fill_field({} : Name, IS NOT EMPTY)".format( condition_class.FIELD_VALUE) ) view = condition.create_view(conditions.ConditionDetailsView, wait="10s") assert view.conditions.tree.currently_selected == [ "All Conditions", "{} Conditions".format(condition_class.TREE_NODE), condition.description ]
RedHatQE/cfme_tests
cfme/tests/control/test_bugs.py
cfme/fixtures/appliance_update.py
# -*- coding: utf-8 -*- """Page model for Cloud Intel / Reports / Dashboard Widgets / Menus""" import attr from . import BaseDashboardReportWidget from . import BaseDashboardWidgetFormCommon from . import BaseEditDashboardWidgetStep from . import BaseEditDashboardWidgetView from . import BaseNewDashboardWidgetStep from . import BaseNewDashboardWidgetView from cfme.utils.appliance.implementations.ui import navigator from widgetastic_manageiq import MenuShortcutsPicker class MenuWidgetFormCommon(BaseDashboardWidgetFormCommon): menu_shortcuts = MenuShortcutsPicker( "form_filter_div", select_id="add_shortcut", names_locator=".//input[starts-with(@name, 'shortcut_desc_')]" ) class NewMenuWidgetView(BaseNewDashboardWidgetView, MenuWidgetFormCommon): pass class EditMenuWidgetView(BaseEditDashboardWidgetView, MenuWidgetFormCommon): pass @attr.s class MenuWidget(BaseDashboardReportWidget): TYPE = "Menus" TITLE = "Menu" pretty_attrs = ["description", "shortcuts", "visibility"] shortcuts = attr.ib(default=None) @property def fill_dict(self): return { "widget_title": self.title, "description": self.description, "active": self.active, "menu_shortcuts": self.shortcuts, "visibility": self.visibility } @navigator.register(MenuWidget, "Add") class NewMenuWidget(BaseNewDashboardWidgetStep): VIEW = NewMenuWidgetView @navigator.register(MenuWidget, "Edit") class EditMenuWidget(BaseEditDashboardWidgetStep): VIEW = EditMenuWidgetView
# -*- coding: utf-8 -*- from collections import namedtuple from datetime import datetime import fauxfactory import pytest from widgetastic.widget import Text from cfme import test_requirements from cfme.control.explorer import conditions from cfme.control.explorer import ControlExplorerView from cfme.control.explorer.alert_profiles import ServerAlertProfile from cfme.control.explorer.policies import VMCompliancePolicy from cfme.control.explorer.policies import VMControlPolicy from cfme.exceptions import CFMEExceptionOccured from cfme.tests.control.test_basic import CONDITIONS from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ test_requirements.control, pytest.mark.tier(3) ] BAD_CONDITIONS = [ conditions.ReplicatorCondition, conditions.PodCondition, conditions.ContainerNodeCondition, conditions.ContainerImageCondition, conditions.ProviderCondition ] def create_policy(request, collection): args = (VMControlPolicy, fauxfactory.gen_alpha()) kwargs = {} policy = collection.create(*args) @request.addfinalizer def _delete(): while policy.exists: policy.delete() return args, kwargs def create_condition(request, collection): args = ( conditions.VMCondition, fauxfactory.gen_alpha(), "fill_field(VM and Instance : Boot Time, BEFORE, Today)" ) kwargs = {} condition = collection.create(*args) @request.addfinalizer def _delete(): while condition.exists: condition.delete() return args, kwargs def create_action(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = { "action_type": "Tag", "action_values": {"tag": ("My Company Tags", "Department", "Accounting")} } action = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while action.exists: action.delete() return args, kwargs def create_alert(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = {"timeline_event": True, "driving_event": "Hourly Timer"} alert = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while alert.exists: alert.delete() return args, kwargs ProfileCreateFunction = namedtuple('ProfileCreateFunction', ['name', 'fn']) items = [ ProfileCreateFunction("Policies", create_policy), ProfileCreateFunction("Conditions", create_condition), ProfileCreateFunction("Actions", create_action), ProfileCreateFunction("Alerts", create_alert) ] @pytest.fixture(scope="module") def collections(appliance): return { "Policies": appliance.collections.policies, "Conditions": appliance.collections.conditions, "Actions": appliance.collections.actions, "Alerts": appliance.collections.alerts } @pytest.fixture def vmware_vm(request, virtualcenter_provider): vm = virtualcenter_provider.appliance.collections.infra_vms.instantiate( random_vm_name("control"), virtualcenter_provider ) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.cleanup_on_provider) return vm @pytest.fixture def hardware_reconfigured_alert(appliance): alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), evaluate=("Hardware Reconfigured", {"hardware_attribute": "RAM"}), timeline_event=True ) yield alert alert.delete() @pytest.fixture def setup_disk_usage_alert(appliance): # get the current time timestamp = datetime.now() # setup the DB query table = appliance.db.client['miq_alert_statuses'] query = appliance.db.client.session.query(table.description, table.evaluated_on) # configure the advanced settings and place a large file on the appliance # disk usage above 1 % will now trigger a disk_usage event appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": 1}}} ) # create a 1 GB file on /var/www/miq/vmdb/log result = appliance.ssh_client.run_command( "dd if=/dev/zero of=/var/www/miq/vmdb/log/delete_me.txt count=1024 bs=1048576" ) # verify that the command was successful assert not result.failed # setup the alert for firing expression = {"expression": "fill_count(Server.EVM Workers, >, 0)"} alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), based_on='Server', evaluate=("Expression (Custom)", expression), driving_event="Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage", notification_frequency="1 Minute", ) alert_profile = appliance.collections.alert_profiles.create( ServerAlertProfile, "Alert profile for {}".format(alert.description), alerts=[alert] ) alert_profile.assign_to("Selected Servers", selections=["Servers", "EVM"]) yield alert, timestamp, query alert_profile.delete() alert.delete() appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": "<<reset>>"}}} ) result = appliance.ssh_client.run_command("rm /var/www/miq/vmdb/log/delete_me.txt") # verify that the command was successful assert not result.failed @pytest.mark.meta(blockers=[BZ(1155284)]) def test_scope_windows_registry_stuck(request, appliance, infra_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/6h """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = appliance.collections.policy_profiles.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM # not assuming tht infra_provider is actually an InfraProvider type vm = infra_provider.appliance.collections.infra_vms.all()[0] vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(appliance.server, 'Dashboard') view = navigate_to(appliance.collections.infra_vms, 'All') assert "except" not in view.entities.title.text.lower() vm.unassign_policy_profiles(profile.description) @pytest.mark.meta(blockers=[BZ(1243357)], automates=[1243357]) def test_invoke_custom_automation(request, appliance): """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear. Steps: * Go create new action, select Invoke Custom Automation * The form with additional fields should appear Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6h """ # The action is to have all possible fields filled, that way we can ensure it is good action = appliance.collections.actions.create( fauxfactory.gen_alpha(), "Invoke a Custom Automation", dict( message=fauxfactory.gen_alpha(), request=fauxfactory.gen_alpha(), attribute_1=fauxfactory.gen_alpha(), value_1=fauxfactory.gen_alpha(), attribute_2=fauxfactory.gen_alpha(), value_2=fauxfactory.gen_alpha(), attribute_3=fauxfactory.gen_alpha(), value_3=fauxfactory.gen_alpha(), attribute_4=fauxfactory.gen_alpha(), value_4=fauxfactory.gen_alpha(), attribute_5=fauxfactory.gen_alpha(), value_5=fauxfactory.gen_alpha() ) ) request.addfinalizer(lambda: action.delete() if action.exists else None) @pytest.mark.meta(blockers=[BZ(1375093)], automates=[1375093]) def test_check_compliance_history(request, virtualcenter_provider, vmware_vm, appliance): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Control """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name) ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy_profile = appliance.collections.policy_profiles.create( policy.description, policies=[policy] ) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles( policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name) @pytest.mark.meta(blockers=[BZ(1395965), BZ(1491576)]) def test_delete_all_actions_from_compliance_policy(request, appliance): """We should not allow a compliance policy to be saved if there are no actions on the compliance event. Steps: * Create a compliance policy * Remove all actions Result: The policy shouldn't be saved. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium caseposneg: negative initialEstimate: 1/12h """ policy = appliance.collections.policies.create( VMCompliancePolicy, fauxfactory.gen_alphanumeric() ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) with pytest.raises(AssertionError): policy.assign_actions_to_event("VM Compliance Check", []) @pytest.mark.parametrize("create_function", items, ids=[item.name for item in items]) def test_control_identical_descriptions(request, create_function, collections, appliance): """CFME should not allow to create policy, alerts, profiles, actions and others to be created if the item with the same description already exists. Steps: * Create an item * Create the same item again Result: The item shouldn't be created. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ args, kwargs = create_function.fn(request, collections[create_function.name]) flash = appliance.browser.create_view(ControlExplorerView).flash try: collections[create_function.name].create(*args, **kwargs) except (TimedOutError, AssertionError): flash.assert_message("Description has already been taken") # force navigation away from the page so the browser is not stuck on the edit page navigate_to(appliance.server, 'ControlExplorer', force=True) @pytest.mark.meta(blockers=[BZ(1231889)], automates=[1231889]) def test_vmware_alarm_selection_does_not_fail(request, appliance): """Test the bug that causes CFME UI to explode when VMware Alarm type is selected. We assert that the alert using this type is simply created. Then we destroy the alert. Metadata: test_flag: alerts Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ try: alert = appliance.collections.alerts.create( "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)), active=True, based_on="VM and Instance", evaluate=("VMware Alarm", {}), notification_frequency="5 Minutes" ) request.addfinalizer(lambda: alert.delete() if alert.exists else None) except CFMEExceptionOccured as e: pytest.fail("CFME has thrown an error: {}".format(str(e))) def test_alert_ram_reconfigured(hardware_reconfigured_alert): """Tests the bug when it was not possible to save an alert with RAM option in hardware attributes. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12h """ view = navigate_to(hardware_reconfigured_alert, "Details") attr = view.hardware_reconfigured_parameters.get_text_of("Hardware Attribute") assert attr == "RAM Increased" @pytest.mark.tier(2) @test_requirements.alert def test_alert_for_disk_usage(setup_disk_usage_alert): """ Bugzilla: 1658670 1672698 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6hr testSteps: 1. Go to Control > Explorer > Alerts 2. Configuration > Add new alert 3. Based on = Server 4. What to evaluate = Expression (Custom) 5. Driving Event = "Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage" 6. Assign the alert to a Alert Profile 7. Assign the Alert Profile to the Server 8. In advanced config, change: events: :disk_usage_gt_percent: 80 to: events: :disk_usage_gt_percent: 1 9. dd a file in /var/www/miq/vmdb/log large enough to trigger 1% disk usage expectedResults: 1. 2. 3. 4. 5. 6. 7. 8. 9. the alert should fire, and the event of type "evm_server_log_disk_usage" should trigger """ alert, timestamp, query = setup_disk_usage_alert def _check_query(): query_result = query.all() if query_result: # here query_result[0][0] and query_result[0][1] correspond to the description and # timestamp pulled from the database, respectively return alert.description == query_result[0][0] and timestamp < query_result[0][1] else: return False # wait for the alert to appear in the miq_alert_statuses table wait_for( _check_query, delay=5, num_sec=600, message="Waiting for alert {} to appear in DB".format(alert.description) ) @pytest.mark.parametrize( "condition_class", CONDITIONS, ids=lambda condition_class: condition_class.__name__ ) def test_accordion_after_condition_creation(appliance, condition_class): """ Bugzilla: 1683697 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12hr For this test, we must make a condition 'manually' and so that we can access the view during the condition creation. """ if BZ(1683697).blocks and condition_class in BAD_CONDITIONS: pytest.skip("Skipping because {} conditions are impacted by BZ 1683697" .format(condition_class.__name__)) condition = appliance.collections.conditions.create(condition_class, fauxfactory.gen_alpha(), expression="fill_field({} : Name, IS NOT EMPTY)".format( condition_class.FIELD_VALUE) ) view = condition.create_view(conditions.ConditionDetailsView, wait="10s") assert view.conditions.tree.currently_selected == [ "All Conditions", "{} Conditions".format(condition_class.TREE_NODE), condition.description ]
RedHatQE/cfme_tests
cfme/tests/control/test_bugs.py
cfme/intelligence/reports/widgets/menu_widgets.py
import attr from navmazing import NavigateToAttribute from navmazing import NavigateToSibling from widgetastic.exceptions import NoSuchElementException from widgetastic.widget import Text from widgetastic.widget import View from widgetastic_patternfly import BootstrapNav from widgetastic_patternfly import BreadCrumb from widgetastic_patternfly import Button from widgetastic_patternfly import Dropdown from cfme.base.ui import BaseLoggedInPage from cfme.common import Taggable from cfme.exceptions import CandidateNotFound from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.appliance.implementations.ui import CFMENavigateStep from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.appliance.implementations.ui import navigator from cfme.utils.pretty import Pretty from cfme.utils.wait import wait_for from widgetastic_manageiq import Accordion from widgetastic_manageiq import BaseEntitiesView from widgetastic_manageiq import ItemsToolBarViewSelector from widgetastic_manageiq import ManageIQTree from widgetastic_manageiq import PaginationPane from widgetastic_manageiq import Search from widgetastic_manageiq import SummaryTable from widgetastic_manageiq import Table class StackToolbar(View): """The toolbar on the stacks page""" configuration = Dropdown('Configuration') policy = Dropdown('Policy') lifecycle = Dropdown('Lifecycle') download = Dropdown('Download') view_selector = View.nested(ItemsToolBarViewSelector) class StackDetailsToolbar(View): """The toolbar on the stacks detail page""" configuration = Dropdown('Configuration') policy = Dropdown('Policy') lifecycle = Dropdown('Lifecycle') download = Button('Download summary in PDF format') class StackSubpageToolbar(View): """The toolbar on the sub pages, like resources and security groups""" show_summary = Button('Show {} Summary') # TODO How to get name in there? configuration = Dropdown('Configuration') policy = Dropdown('Policy') lifecycle = Dropdown('Lifecycle') class StackDetailsAccordion(View): """The accordion on the details page""" @View.nested class properties(Accordion): # noqa nav = BootstrapNav('//div[@id="stack_prop"]//ul') @View.nested class relationships(Accordion): # noqa nav = BootstrapNav('//div[@id="stack_rel"]//ul') class StackEntities(BaseEntitiesView): """The entities on the main list page""" table = Table("//div[@id='gtl_div']//table") # todo: remove table and use entities instead class StackDetailsEntities(View): """The entties on the detail page""" breadcrumb = BreadCrumb() title = Text('//div[@id="main-content"]//h1') properties = SummaryTable(title='Properties') lifecycle = SummaryTable(title='Lifecycle') relationships = SummaryTable(title='Relationships') smart_management = SummaryTable(title='Smart Management') # element attributes changed from id to class in upstream-fine+, capture both with locator class StackSecurityGroupsEntities(View): """The entities of the resources page""" breadcrumb = BreadCrumb() title = Text('//div[@id="main-content"]//h1') security_groups = Table('//div[@id="list_grid"]//table') class StackParametersEntities(View): """The entities of the resources page""" breadcrumb = BreadCrumb() title = Text('//div[@id="main-content"]//h1') parameters = Table('//div[@id="list_grid"]//table') class StackOutputsEntities(View): """The entities of the resources page""" breadcrumb = BreadCrumb() title = Text('//div[@id="main-content"]//h1') outputs = Table('//div[@id="gtl_div"]//table') class StackOutputsDetails(BaseLoggedInPage): title = Text('//div[@id="main-content"]//h1') @property def is_displayed(self): """Is this page currently being displayed""" return self.in_stacks and self.entities.title.is_displayed class StackResourcesEntities(View): """The entities of the resources page""" breadcrumb = BreadCrumb() title = Text('//div[@id="main-content"]//h1') resources = Table('//div[@id="list_grid"]//table') class StackView(BaseLoggedInPage): """The base view for header and nav checking""" @property def in_stacks(self): """Determine if the Stacks page is currently open""" return ( self.logged_in_as_current_user and self.navigation.currently_selected == ['Compute', 'Clouds', 'Stacks'] ) class StackAllView(StackView): """The main list page""" toolbar = View.nested(StackToolbar) search = View.nested(Search) including_entities = View.include(StackEntities, use_parent=True) paginator = PaginationPane() @View.nested class my_filters(Accordion): # noqa ACCORDION_NAME = "My Filters" navigation = BootstrapNav('.//div/ul') tree = ManageIQTree() @property def is_displayed(self): """Is this page currently being displayed""" return self.in_stacks and self.entities.title.text == 'Orchestration Stacks' class ProviderStackAllView(StackAllView): @property def is_displayed(self): """Is this page currently being displayed""" msg = '{} (All Orchestration Stacks)'.format(self.context['object'].name) return ( self.logged_in_as_current_user and self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and self.entities.title.text == msg ) class StackDetailsView(StackView): """The detail page""" toolbar = View.nested(StackDetailsToolbar) sidebar = View.nested(StackDetailsAccordion) entities = View.nested(StackDetailsEntities) @property def is_displayed(self): """Is this page currently being displayed""" expected_title = '{} (Summary)'.format(self.context['object'].name) return ( self.in_stacks and self.entities.title.text == expected_title and self.entities.breadcrumb.active_location == expected_title) class StackSecurityGroupsView(StackView): """The resources page""" toolbar = View.nested(StackSubpageToolbar) sidebar = View.nested(StackDetailsAccordion) entities = View.nested(StackSecurityGroupsEntities) @property def is_displayed(self): """Is this page currently being displayed""" expected_title = '{} (All Security Groups)'.format(self.context['object'].name) return ( self.in_stacks and self.entities.title.text == expected_title and self.entities.breadcrumb.active_location == expected_title) class StackParametersView(StackView): """The resources page""" toolbar = View.nested(StackSubpageToolbar) sidebar = View.nested(StackDetailsAccordion) entities = View.nested(StackParametersEntities) @property def is_displayed(self): """Is this page currently being displayed""" expected_title = '{} (Parameters)'.format(self.context['object'].name) return ( self.in_stacks and self.entities.title.text == expected_title and self.entities.breadcrumb.active_location == expected_title) class StackOutputsView(StackView): """The resources page""" toolbar = View.nested(StackSubpageToolbar) sidebar = View.nested(StackDetailsAccordion) entities = View.nested(StackOutputsEntities) @property def is_displayed(self): """Is this page currently being displayed""" expected_title = '{} (Outputs)'.format(self.context['object'].name) return ( self.in_stacks and self.entities.title.text == expected_title and self.entities.breadcrumb.active_location == expected_title) class StackResourcesView(StackView): """The resources page""" toolbar = View.nested(StackSubpageToolbar) sidebar = View.nested(StackDetailsAccordion) entities = View.nested(StackResourcesEntities) @property def is_displayed(self): """Is this page currently being displayed""" expected_title = '{} (Resources)'.format(self.context['object'].name) return ( self.in_stacks and self.entities.title.text == expected_title and self.entities.breadcrumb.active_location == expected_title) @attr.s class Stack(Pretty, BaseEntity, Taggable): _param_name = "Stack" pretty_attrs = ['name'] name = attr.ib() provider = attr.ib() quad_name = attr.ib(default='stack') @property def exists(self): view = navigate_to(self.parent, 'All') view.toolbar.view_selector.select('List View') try: view.paginator.find_row_on_pages(view.table, name=self.name) return True except NoSuchElementException: return False def delete(self): """Delete the stack from detail view""" view = navigate_to(self, 'Details') view.toolbar.configuration.item_select('Remove this Orchestration Stack from Inventory', handle_alert=True) view.flash.assert_success_message('The selected Orchestration Stacks was deleted') def refresh(): """Refresh the view""" if self.provider: self.provider.refresh_provider_relationships() view.browser.selenium.refresh() view.flush_widget_cache() wait_for(lambda: not self.exists, fail_condition=False, fail_func=refresh, num_sec=15 * 60, delay=30, message='Wait for stack to be deleted') def wait_for_exists(self): """Wait for the row to show up""" view = navigate_to(self.parent, 'All') def refresh(): """Refresh the view""" if self.provider: self.provider.refresh_provider_relationships() view.browser.refresh() view.flush_widget_cache() wait_for(lambda: self.exists, fail_condition=False, fail_func=refresh, num_sec=15 * 60, delay=30, message='Wait for stack to exist') def retire_stack(self, wait=True): view = navigate_to(self.parent, 'All') view.toolbar.view_selector.select('List View') row = view.paginator.find_row_on_pages(view.table, name=self.name) row[0].check() view.toolbar.lifecycle.item_select('Retire selected Orchestration Stacks', handle_alert=True) view.flash.assert_success_message( 'Retirement initiated for 1 Orchestration Stack from the CFME Database') if wait: def refresh(): """Refresh the view""" if self.provider: self.provider.refresh_provider_relationships() view.browser.refresh() view.flush_widget_cache() wait_for(lambda: not self.exists, fail_condition=False, fail_func=refresh, delay=30, num_sec=15 * 60, message='Wait for stack to be deleted') @attr.s class StackCollection(BaseCollection): """Collection class for cfme.cloud.stack.Stack""" ENTITY = Stack def delete(self, *stacks): stacks = list(stacks) stack_names = {stack.name for stack in stacks} checked_stack_names = set() view = navigate_to(self, 'All') view.toolbar.view_selector.select('List View') for stack in stacks: try: view.table.row(name=stack.name)[0].check() checked_stack_names.add(stack.name) except NoSuchElementException: break if stack_names == checked_stack_names: view.toolbar.configuration.item_select('Remove Orchestration Stacks from Inventory', handle_alert=True) view.flash.assert_no_error() view.flash.assert_success_message( 'Delete initiated for {} Orchestration Stacks from the CFME Database' .format(len(stacks)) ) for stack in stacks: wait_for(lambda: not stack.exists, num_sec=15 * 60, delay=30, message='Wait for stack to be deleted') else: raise ValueError( 'Some Stacks ({!r}) not found in the UI'.format(stack_names - checked_stack_names)) @navigator.register(StackCollection, 'All') class All(CFMENavigateStep): VIEW = StackAllView prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn') def step(self, *args, **kwargs): """Go to the all page""" self.prerequisite_view.navigation.select('Compute', 'Clouds', 'Stacks') def resetter(self, *args, **kwargs): """Reset the view""" self.view.toolbar.view_selector.select('Grid View') self.view.paginator.reset_selection() @navigator.register(Stack, 'Details') class Details(CFMENavigateStep): VIEW = StackDetailsView prerequisite = NavigateToAttribute('parent', 'All') def step(self, *args, **kwargs): """Go to the details page""" self.prerequisite_view.toolbar.view_selector.select('List View') row = self.prerequisite_view.paginator.find_row_on_pages( self.prerequisite_view.table, name=self.obj.name) row.click() @navigator.register(Stack, 'RelationshipSecurityGroups') class RelationshipsSecurityGroups(CFMENavigateStep): VIEW = StackSecurityGroupsView prerequisite = NavigateToSibling('Details') def step(self, *args, **kwargs): self.prerequisite_view.sidebar.relationships.open() try: self.prerequisite_view.sidebar.relationships.nav.select( title='Show all Security Groups') except NoSuchElementException: raise CandidateNotFound({'No security groups for stack': 'cannot navigate'}) @navigator.register(Stack, 'RelationshipParameters') class RelationshipParameters(CFMENavigateStep): VIEW = StackParametersView prerequisite = NavigateToSibling('Details') def step(self, *args, **kwargs): self.prerequisite_view.sidebar.relationships.open() try: self.prerequisite_view.sidebar.relationships.nav.select(title='Show all Parameters') except NoSuchElementException: raise CandidateNotFound({'No parameters for stack': 'cannot navigate'}) @navigator.register(Stack, 'RelationshipOutputs') class RelationshipOutputs(CFMENavigateStep): VIEW = StackOutputsView prerequisite = NavigateToSibling('Details') def step(self, *args, **kwargs): self.prerequisite_view.sidebar.relationships.open() try: self.prerequisite_view.sidebar.relationships.nav.select(title='Show all Outputs') except NoSuchElementException: raise CandidateNotFound({'No outputs for stack': 'cannot navigate'}) @navigator.register(Stack, 'RelationshipResources') class RelationshipResources(CFMENavigateStep): VIEW = StackResourcesView prerequisite = NavigateToSibling('Details') def step(self, *args, **kwargs): self.prerequisite_view.sidebar.relationships.open() try: self.prerequisite_view.sidebar.relationships.nav.select(title='Show all Resources') except NoSuchElementException: raise CandidateNotFound({'No resources for stack': 'cannot navigate'})
# -*- coding: utf-8 -*- from collections import namedtuple from datetime import datetime import fauxfactory import pytest from widgetastic.widget import Text from cfme import test_requirements from cfme.control.explorer import conditions from cfme.control.explorer import ControlExplorerView from cfme.control.explorer.alert_profiles import ServerAlertProfile from cfme.control.explorer.policies import VMCompliancePolicy from cfme.control.explorer.policies import VMControlPolicy from cfme.exceptions import CFMEExceptionOccured from cfme.tests.control.test_basic import CONDITIONS from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.generators import random_vm_name from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ test_requirements.control, pytest.mark.tier(3) ] BAD_CONDITIONS = [ conditions.ReplicatorCondition, conditions.PodCondition, conditions.ContainerNodeCondition, conditions.ContainerImageCondition, conditions.ProviderCondition ] def create_policy(request, collection): args = (VMControlPolicy, fauxfactory.gen_alpha()) kwargs = {} policy = collection.create(*args) @request.addfinalizer def _delete(): while policy.exists: policy.delete() return args, kwargs def create_condition(request, collection): args = ( conditions.VMCondition, fauxfactory.gen_alpha(), "fill_field(VM and Instance : Boot Time, BEFORE, Today)" ) kwargs = {} condition = collection.create(*args) @request.addfinalizer def _delete(): while condition.exists: condition.delete() return args, kwargs def create_action(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = { "action_type": "Tag", "action_values": {"tag": ("My Company Tags", "Department", "Accounting")} } action = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while action.exists: action.delete() return args, kwargs def create_alert(request, collection): args = (fauxfactory.gen_alpha(),) kwargs = {"timeline_event": True, "driving_event": "Hourly Timer"} alert = collection.create(*args, **kwargs) @request.addfinalizer def _delete(): while alert.exists: alert.delete() return args, kwargs ProfileCreateFunction = namedtuple('ProfileCreateFunction', ['name', 'fn']) items = [ ProfileCreateFunction("Policies", create_policy), ProfileCreateFunction("Conditions", create_condition), ProfileCreateFunction("Actions", create_action), ProfileCreateFunction("Alerts", create_alert) ] @pytest.fixture(scope="module") def collections(appliance): return { "Policies": appliance.collections.policies, "Conditions": appliance.collections.conditions, "Actions": appliance.collections.actions, "Alerts": appliance.collections.alerts } @pytest.fixture def vmware_vm(request, virtualcenter_provider): vm = virtualcenter_provider.appliance.collections.infra_vms.instantiate( random_vm_name("control"), virtualcenter_provider ) vm.create_on_provider(find_in_cfme=True) request.addfinalizer(vm.cleanup_on_provider) return vm @pytest.fixture def hardware_reconfigured_alert(appliance): alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), evaluate=("Hardware Reconfigured", {"hardware_attribute": "RAM"}), timeline_event=True ) yield alert alert.delete() @pytest.fixture def setup_disk_usage_alert(appliance): # get the current time timestamp = datetime.now() # setup the DB query table = appliance.db.client['miq_alert_statuses'] query = appliance.db.client.session.query(table.description, table.evaluated_on) # configure the advanced settings and place a large file on the appliance # disk usage above 1 % will now trigger a disk_usage event appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": 1}}} ) # create a 1 GB file on /var/www/miq/vmdb/log result = appliance.ssh_client.run_command( "dd if=/dev/zero of=/var/www/miq/vmdb/log/delete_me.txt count=1024 bs=1048576" ) # verify that the command was successful assert not result.failed # setup the alert for firing expression = {"expression": "fill_count(Server.EVM Workers, >, 0)"} alert = appliance.collections.alerts.create( fauxfactory.gen_alpha(), based_on='Server', evaluate=("Expression (Custom)", expression), driving_event="Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage", notification_frequency="1 Minute", ) alert_profile = appliance.collections.alert_profiles.create( ServerAlertProfile, "Alert profile for {}".format(alert.description), alerts=[alert] ) alert_profile.assign_to("Selected Servers", selections=["Servers", "EVM"]) yield alert, timestamp, query alert_profile.delete() alert.delete() appliance.update_advanced_settings( {"server": {"events": {"disk_usage_gt_percent": "<<reset>>"}}} ) result = appliance.ssh_client.run_command("rm /var/www/miq/vmdb/log/delete_me.txt") # verify that the command was successful assert not result.failed @pytest.mark.meta(blockers=[BZ(1155284)]) def test_scope_windows_registry_stuck(request, appliance, infra_provider): """If you provide Scope checking windows registry, it messes CFME up. Recoverable. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/6h """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Windows registry scope glitch testing Compliance Policy", active=True, scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, " r"some value, INCLUDES, some content)" ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) profile = appliance.collections.policy_profiles.create( "Windows registry scope glitch testing Compliance Policy", policies=[policy] ) request.addfinalizer(lambda: profile.delete() if profile.exists else None) # Now assign this malformed profile to a VM # not assuming tht infra_provider is actually an InfraProvider type vm = infra_provider.appliance.collections.infra_vms.all()[0] vm.assign_policy_profiles(profile.description) # It should be screwed here, but do additional check navigate_to(appliance.server, 'Dashboard') view = navigate_to(appliance.collections.infra_vms, 'All') assert "except" not in view.entities.title.text.lower() vm.unassign_policy_profiles(profile.description) @pytest.mark.meta(blockers=[BZ(1243357)], automates=[1243357]) def test_invoke_custom_automation(request, appliance): """This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear. Steps: * Go create new action, select Invoke Custom Automation * The form with additional fields should appear Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6h """ # The action is to have all possible fields filled, that way we can ensure it is good action = appliance.collections.actions.create( fauxfactory.gen_alpha(), "Invoke a Custom Automation", dict( message=fauxfactory.gen_alpha(), request=fauxfactory.gen_alpha(), attribute_1=fauxfactory.gen_alpha(), value_1=fauxfactory.gen_alpha(), attribute_2=fauxfactory.gen_alpha(), value_2=fauxfactory.gen_alpha(), attribute_3=fauxfactory.gen_alpha(), value_3=fauxfactory.gen_alpha(), attribute_4=fauxfactory.gen_alpha(), value_4=fauxfactory.gen_alpha(), attribute_5=fauxfactory.gen_alpha(), value_5=fauxfactory.gen_alpha() ) ) request.addfinalizer(lambda: action.delete() if action.exists else None) @pytest.mark.meta(blockers=[BZ(1375093)], automates=[1375093]) def test_check_compliance_history(request, virtualcenter_provider, vmware_vm, appliance): """This test checks if compliance history link in a VM details screen work. Steps: * Create any VM compliance policy * Assign it to a policy profile * Assign the policy profile to any VM * Perform the compliance check for the VM * Go to the VM details screen * Click on "History" row in Compliance InfoBox Result: Compliance history screen with last 10 checks should be opened Polarion: assignee: jdupuy initialEstimate: 1/4h casecomponent: Control """ policy = appliance.collections.policies.create( VMCompliancePolicy, "Check compliance history policy {}".format(fauxfactory.gen_alpha()), active=True, scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name) ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) policy_profile = appliance.collections.policy_profiles.create( policy.description, policies=[policy] ) request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None) virtualcenter_provider.assign_policy_profiles(policy_profile.description) request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles( policy_profile.description)) vmware_vm.check_compliance() vmware_vm.open_details(["Compliance", "History"]) history_screen_title = Text(appliance.browser.widgetastic, "//span[@id='explorer_title_text']").text assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format( vmware_vm.name) @pytest.mark.meta(blockers=[BZ(1395965), BZ(1491576)]) def test_delete_all_actions_from_compliance_policy(request, appliance): """We should not allow a compliance policy to be saved if there are no actions on the compliance event. Steps: * Create a compliance policy * Remove all actions Result: The policy shouldn't be saved. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium caseposneg: negative initialEstimate: 1/12h """ policy = appliance.collections.policies.create( VMCompliancePolicy, fauxfactory.gen_alphanumeric() ) request.addfinalizer(lambda: policy.delete() if policy.exists else None) with pytest.raises(AssertionError): policy.assign_actions_to_event("VM Compliance Check", []) @pytest.mark.parametrize("create_function", items, ids=[item.name for item in items]) def test_control_identical_descriptions(request, create_function, collections, appliance): """CFME should not allow to create policy, alerts, profiles, actions and others to be created if the item with the same description already exists. Steps: * Create an item * Create the same item again Result: The item shouldn't be created. Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ args, kwargs = create_function.fn(request, collections[create_function.name]) flash = appliance.browser.create_view(ControlExplorerView).flash try: collections[create_function.name].create(*args, **kwargs) except (TimedOutError, AssertionError): flash.assert_message("Description has already been taken") # force navigation away from the page so the browser is not stuck on the edit page navigate_to(appliance.server, 'ControlExplorer', force=True) @pytest.mark.meta(blockers=[BZ(1231889)], automates=[1231889]) def test_vmware_alarm_selection_does_not_fail(request, appliance): """Test the bug that causes CFME UI to explode when VMware Alarm type is selected. We assert that the alert using this type is simply created. Then we destroy the alert. Metadata: test_flag: alerts Polarion: assignee: jdupuy casecomponent: Control caseimportance: low initialEstimate: 1/12h """ try: alert = appliance.collections.alerts.create( "Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)), active=True, based_on="VM and Instance", evaluate=("VMware Alarm", {}), notification_frequency="5 Minutes" ) request.addfinalizer(lambda: alert.delete() if alert.exists else None) except CFMEExceptionOccured as e: pytest.fail("CFME has thrown an error: {}".format(str(e))) def test_alert_ram_reconfigured(hardware_reconfigured_alert): """Tests the bug when it was not possible to save an alert with RAM option in hardware attributes. Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12h """ view = navigate_to(hardware_reconfigured_alert, "Details") attr = view.hardware_reconfigured_parameters.get_text_of("Hardware Attribute") assert attr == "RAM Increased" @pytest.mark.tier(2) @test_requirements.alert def test_alert_for_disk_usage(setup_disk_usage_alert): """ Bugzilla: 1658670 1672698 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/6hr testSteps: 1. Go to Control > Explorer > Alerts 2. Configuration > Add new alert 3. Based on = Server 4. What to evaluate = Expression (Custom) 5. Driving Event = "Appliance Operation: Server High /var/www/miq/vmdb/log Disk Usage" 6. Assign the alert to a Alert Profile 7. Assign the Alert Profile to the Server 8. In advanced config, change: events: :disk_usage_gt_percent: 80 to: events: :disk_usage_gt_percent: 1 9. dd a file in /var/www/miq/vmdb/log large enough to trigger 1% disk usage expectedResults: 1. 2. 3. 4. 5. 6. 7. 8. 9. the alert should fire, and the event of type "evm_server_log_disk_usage" should trigger """ alert, timestamp, query = setup_disk_usage_alert def _check_query(): query_result = query.all() if query_result: # here query_result[0][0] and query_result[0][1] correspond to the description and # timestamp pulled from the database, respectively return alert.description == query_result[0][0] and timestamp < query_result[0][1] else: return False # wait for the alert to appear in the miq_alert_statuses table wait_for( _check_query, delay=5, num_sec=600, message="Waiting for alert {} to appear in DB".format(alert.description) ) @pytest.mark.parametrize( "condition_class", CONDITIONS, ids=lambda condition_class: condition_class.__name__ ) def test_accordion_after_condition_creation(appliance, condition_class): """ Bugzilla: 1683697 Polarion: assignee: jdupuy casecomponent: Control caseimportance: medium initialEstimate: 1/12hr For this test, we must make a condition 'manually' and so that we can access the view during the condition creation. """ if BZ(1683697).blocks and condition_class in BAD_CONDITIONS: pytest.skip("Skipping because {} conditions are impacted by BZ 1683697" .format(condition_class.__name__)) condition = appliance.collections.conditions.create(condition_class, fauxfactory.gen_alpha(), expression="fill_field({} : Name, IS NOT EMPTY)".format( condition_class.FIELD_VALUE) ) view = condition.create_view(conditions.ConditionDetailsView, wait="10s") assert view.conditions.tree.currently_selected == [ "All Conditions", "{} Conditions".format(condition_class.TREE_NODE), condition.description ]
RedHatQE/cfme_tests
cfme/tests/control/test_bugs.py
cfme/cloud/stack.py
from collections import Mapping from contextlib import contextmanager from itertools import izip from cached_property import cached_property from sqlalchemy import MetaData, create_engine, event, inspect from sqlalchemy.exc import ArgumentError, DisconnectionError, InvalidRequestError from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from sqlalchemy.pool import Pool from fixtures.pytest_store import store from utils import conf from utils.log import logger @event.listens_for(Pool, "checkout") def ping_connection(dbapi_connection, connection_record, connection_proxy): """ping_connection event hook, used to reconnect db sessions that time out Note: See also: :ref:`Connection Invalidation <sqlalchemy:pool_connection_invalidation>` """ cursor = dbapi_connection.cursor() try: cursor.execute("SELECT 1") except StandardError: raise DisconnectionError cursor.close() class Db(Mapping): """Helper class for interacting with a CFME database using SQLAlchemy Args: hostname: base url to be used (default is from current_appliance) credentials: name of credentials to use from :py:attr:`utils.conf.credentials` (default ``database``) Provides convient attributes to common sqlalchemy objects related to this DB, as well as a Mapping interface to access and reflect database tables. Where possible, attributes are cached. Db objects support getting tables by name via the mapping interface:: table = db['table_name'] Usage: # Usually used to query the DB for info, here's a common query for vm in db.session.query(db['vms']).all(): print(vm.name) print(vm.guid) # List comprehension to get all templates [(vm.name, vm.guid) for vm in session.query(db['vms']).all() if vm.template is True] # Use the transaction manager for write operations: with db.transaction: db.session.query(db['vms']).all().delete() Note: Creating a table object requires a call to the database so that SQLAlchemy can do reflection to determine the table's structure (columns, keys, indices, etc). On a latent connection, this can be extremely slow, which will affect methods that return tables, like the mapping interface or :py:meth:`values`. """ def __init__(self, hostname=None, credentials=None, port=None): self._table_cache = {} self.hostname = hostname or store.current_appliance.db.address self.port = port or store.current_appliance.db_port self.credentials = credentials or conf.credentials['database'] def __getitem__(self, table_name): """Access tables as items contained in this db Usage: # To get a table called 'table_name': db['table_name'] This may return ``None`` in the case where a table is found but reflection fails. """ try: return self._table(table_name) except InvalidRequestError: raise KeyError('Table {} could not be found'.format(table_name)) def __iter__(self): """Iterator of table names in this db""" return self.keys() def __len__(self): """Number of tables in this db""" return len(self.table_names) def __contains__(self, table_name): """Whether or not the named table is in this db""" return table_name in self.table_names def keys(self): """Iterator of table names in this db""" return (table_name for table_name in self.table_names) def items(self): """Iterator of ``(table_name, table)`` pairs""" return izip(self.keys(), self.values()) def values(self): """Iterator of tables in this db""" return (self[table_name] for table_name in self.table_names) def get(self, table_name, default=None): """table getter Args: table_name: Name of the table to get default: Default value to return if ``table_name`` is not found. Returns: a table if ``table_name`` exists, otherwise 'None' or the passed-in default """ try: return self[table_name] except KeyError: return default def copy(self): """Copy this database instance, keeping the same credentials and hostname""" return type(self)(self.hostname, self.credentials) def __eq__(self, other): """Check if this db is equal to another db""" try: return self.hostname == other.hostname except: return False def __ne__(self, other): """Check if this db is not equal to another db""" return not self == other @cached_property def engine(self): """The :py:class:`Engine <sqlalchemy:sqlalchemy.engine.Engine>` for this database It uses pessimistic disconnection handling, checking that the database is still connected before executing commands. """ return create_engine(self.db_url, echo_pool=True) @cached_property def sessionmaker(self): """A :py:class:`sessionmaker <sqlalchemy:sqlalchemy.orm.session.sessionmaker>` Used to make new sessions with this database, as needed. """ return sessionmaker(bind=self.engine) @cached_property def table_base(self): """Base class for all tables returned by this database This base class is created using :py:class:`declarative_base <sqlalchemy:sqlalchemy.ext.declarative.declarative_base>`. """ return declarative_base(metadata=self.metadata) @cached_property def metadata(self): """:py:class:`MetaData <sqlalchemy:sqlalchemy.schema.MetaData>` for this database This can be used for introspection of reflected items. Note: Tables that haven't been reflected won't show up in metadata. To reflect a table, use :py:meth:`reflect_table`. """ return MetaData(bind=self.engine) @cached_property def db_url(self): """The connection URL for this database, including credentials""" template = "postgresql://{username}:{password}@{host}:{port}/vmdb_production" result = template.format(host=self.hostname, port=self.port, **self.credentials) logger.info("[DB] db_url is %s", result) return result @cached_property def table_names(self): """A sorted list of table names available in this database.""" # rails table names follow similar rules as pep8 identifiers; expose them as such return sorted(inspect(self.engine).get_table_names()) @cached_property def session(self): """Returns a :py:class:`Session <sqlalchemy:sqlalchemy.orm.session.Session>` This is used for database queries. For writing to the database, start a :py:meth:`transaction`. Note: This attribute is cached. In cases where a new session needs to be explicitly created, use :py:meth:`sessionmaker`. """ return self.sessionmaker(autocommit=True) @property @contextmanager def transaction(self): """Context manager for simple transaction management Sessions understand the concept of transactions, and provider context managers to handle conditionally committing or rolling back transactions as needed. Note: Sessions automatically commit transactions by default. For predictable results when writing to the database, use the transaction manager. Usage: with db.transaction: db.session.do_something() """ with self.session.begin(): yield def reflect_table(self, table_name): """Populate :py:attr:`metadata` with information on a table Args: table_name: The name of a table to reflect """ self.metadata.reflect(only=[table_name]) def _table(self, table_name): """Retrieves, reflects, and caches table objects Actual implementation of __getitem__ """ try: return self._table_cache[table_name] except KeyError: self.reflect_table(table_name) table = self.metadata.tables[table_name] table_dict = { '__table__': table, '__tablename__': table_name } try: table_cls = type(str(table_name), (self.table_base,), table_dict) self._table_cache[table_name] = table_cls return table_cls except ArgumentError: # This usually happens on join tables with no PKs logger.info('Unable to create table class for table "{}"'.format(table_name)) return None @contextmanager def database_on_server(hostname, **kwargs): db_obj = Db(hostname=hostname, **kwargs) yield db_obj
import pytest from cfme.common.vm import VM from cfme.infrastructure.provider.rhevm import RHEVMProvider from cfme.infrastructure.provider.virtualcenter import VMwareProvider from utils import testgen from utils.wait import wait_for from utils.generators import random_vm_name pytest_generate_tests = testgen.generate( [VMwareProvider, RHEVMProvider], required_fields=['small_template'], scope="module") pytestmark = [ pytest.mark.usefixtures('setup_provider'), pytest.mark.long_running, pytest.mark.tier(2)] @pytest.yield_fixture(scope='module') def small_vm(provider, small_template_modscope): vm = VM.factory(random_vm_name(context='reconfig'), provider, small_template_modscope) vm.create_on_provider(find_in_cfme=True, allow_skip="default") vm.refresh_relationships() yield vm vm.delete_from_provider() @pytest.fixture(scope='function') def ensure_vm_stopped(small_vm): if small_vm.is_pwr_option_available_in_cfme(small_vm.POWER_OFF): small_vm.power_control_from_provider(small_vm.POWER_OFF) small_vm.wait_for_vm_state_change(small_vm.STATE_OFF) else: raise Exception("Unknown power state - unable to continue!") @pytest.fixture(scope='function') def ensure_vm_running(small_vm): if small_vm.is_pwr_option_available_in_cfme(small_vm.POWER_ON): small_vm.power_control_from_provider(small_vm.POWER_ON) small_vm.wait_for_vm_state_change(small_vm.STATE_ON) else: raise Exception("Unknown power state - unable to continue!") @pytest.mark.parametrize('change_type', ['cores_per_socket', 'sockets', 'memory']) def test_vm_reconfig_add_remove_hw_cold( provider, small_vm, ensure_vm_stopped, change_type): orig_config = small_vm.configuration.copy() new_config = orig_config.copy() if change_type == 'cores_per_socket': new_config.hw.cores_per_socket = new_config.hw.cores_per_socket + 1 elif change_type == 'sockets': new_config.hw.sockets = new_config.hw.sockets + 1 elif change_type == 'memory': new_config.hw.mem_size = new_config.hw.mem_size_mb + 512 new_config.hw.mem_size_unit = 'MB' small_vm.reconfigure(new_config) wait_for( lambda: small_vm.configuration == new_config, timeout=360, delay=45, fail_func=small_vm.refresh_relationships, message="confirm that {} was added".format(change_type)) small_vm.reconfigure(orig_config) wait_for( lambda: small_vm.configuration == orig_config, timeout=360, delay=45, fail_func=small_vm.refresh_relationships, message="confirm that previously-added {} was removed".format(change_type)) @pytest.mark.parametrize('disk_type', ['thin', 'thick']) @pytest.mark.parametrize( 'disk_mode', ['persistent', 'independent_persistent', 'independent_nonpersistent']) @pytest.mark.uncollectif(lambda provider: provider.one_of(RHEVMProvider)) def test_vm_reconfig_add_remove_disk_cold( provider, small_vm, ensure_vm_stopped, disk_type, disk_mode): orig_config = small_vm.configuration.copy() new_config = orig_config.copy() new_config.add_disk( size=5, size_unit='GB', type=disk_type, mode=disk_mode) small_vm.reconfigure(new_config) wait_for( lambda: small_vm.configuration == new_config, timeout=360, delay=45, fail_func=small_vm.refresh_relationships, message="confirm that disk was added") small_vm.reconfigure(orig_config) wait_for( lambda: small_vm.configuration == orig_config, timeout=360, delay=45, fail_func=small_vm.refresh_relationships, message="confirm that previously-added disk was removed")
dajohnso/cfme_tests
cfme/tests/infrastructure/test_vm_reconfigure.py
utils/db.py
"""Precompute coefficients of several series expansions of Wright's generalized Bessel function Phi(a, b, x). See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x. """ from argparse import ArgumentParser, RawTextHelpFormatter import numpy as np from scipy.integrate import quad from scipy.optimize import minimize_scalar, curve_fit from time import time try: import sympy # type: ignore[import] from sympy import EulerGamma, Rational, S, Sum, \ factorial, gamma, gammasimp, pi, polygamma, symbols, zeta from sympy.polys.polyfuncs import horner # type: ignore[import] except ImportError: pass def series_small_a(): """Tylor series expansion of Phi(a, b, x) in a=0 up to order 5. """ order = 5 a, b, x, k = symbols("a b x k") A = [] # terms with a X = [] # terms with x B = [] # terms with b (polygammas) # Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i]) expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity)) expression = gamma(b)/sympy.exp(x) * expression # nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0) for n in range(0, order+1): term = expression.diff(a, n).subs(a, 0).simplify().doit() # set the whole bracket involving polygammas to 1 x_part = (term.subs(polygamma(0, b), 1) .replace(polygamma, lambda *args: 0)) # sign convetion: x part always positive x_part *= (-1)**n A.append(a**n/factorial(n)) X.append(horner(x_part)) B.append(horner((term/x_part).simplify())) s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n" s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n" for name, c in zip(['A', 'X', 'B'], [A, X, B]): for i in range(len(c)): s += f"\n{name}[{i}] = " + str(c[i]) return s # expansion of digamma def dg_series(z, n): """Symbolic expansion of digamma(z) in z=0 to order n. See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2 """ k = symbols("k") return -1/z - EulerGamma + \ sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1)) def pg_series(k, z, n): """Symbolic expansion of polygamma(k, z) in z=0 to order n.""" return sympy.diff(dg_series(z, n+k), z, k) def series_small_a_small_b(): """Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5. Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and polygamma functions. digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2) digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2) polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2) and so on. """ order = 5 a, b, x, k = symbols("a b x k") M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3") c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3} A = [] # terms with a X = [] # terms with x B = [] # terms with b (polygammas expanded) C = [] # terms that generate B # Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i]) # B[0] = 1 # B[k] = sum(C[k] * b**k/k!, k=0..) # Note: C[k] can be obtained from a series expansion of 1/gamma(b). expression = gamma(b)/sympy.exp(x) * \ Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity)) # nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0) for n in range(0, order+1): term = expression.diff(a, n).subs(a, 0).simplify().doit() # set the whole bracket involving polygammas to 1 x_part = (term.subs(polygamma(0, b), 1) .replace(polygamma, lambda *args: 0)) # sign convetion: x part always positive x_part *= (-1)**n # expansion of polygamma part with 1/gamma(b) pg_part = term/x_part/gamma(b) if n >= 1: # Note: highest term is digamma^n pg_part = pg_part.replace(polygamma, lambda k, x: pg_series(k, x, order+1+n)) pg_part = (pg_part.series(b, 0, n=order+1-n) .removeO() .subs(polygamma(2, 1), -2*zeta(3)) .simplify() ) A.append(a**n/factorial(n)) X.append(horner(x_part)) B.append(pg_part) # Calculate C and put in the k! C = sympy.Poly(B[1].subs(c_subs), b).coeffs() C.reverse() for i in range(len(C)): C[i] = (C[i] * factorial(i)).simplify() s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5." s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n" s += "B[0] = 1\n" s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n" s += "\nM_PI = pi" s += "\nM_EG = EulerGamma" s += "\nM_Z3 = zeta(3)" for name, c in zip(['A', 'X'], [A, X]): for i in range(len(c)): s += f"\n{name}[{i}] = " s += str(c[i]) # For C, do also compute the values numerically for i in range(len(C)): s += f"\n# C[{i}] = " s += str(C[i]) s += f"\nC[{i}] = " s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)}) .evalf(17)) # Does B have the assumed structure? s += "\n\nTest if B[i] does have the assumed structure." s += "\nC[i] are derived from B[1] allone." s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .." test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)]) test = (test - B[2].subs(c_subs)).simplify() s += f"\ntest successful = {test==S(0)}" s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .." test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)]) test = (test - B[3].subs(c_subs)).simplify() s += f"\ntest successful = {test==S(0)}" return s def asymptotic_series(): """Asymptotic expansion for large x. Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k Z = (a*x)^(1/(1+a)) Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and a_1). With slightly different notation, Paris (2017) lists coefficients c_k up to order k=3. Paris (2017) uses ZP = (1+a)/a * Z (ZP = Z of Paris) and C_k = C_0 * (-a/(1+a))^k * c_k """ order = 8 class g(sympy.Function): """Helper function g according to Wright (1935) g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...) Note: Wright (1935) uses square root of above definition. """ nargs = 3 @classmethod def eval(cls, n, rho, v): if not n >= 0: raise ValueError("must have n >= 0") elif n == 0: return 1 else: return g(n-1, rho, v) \ + gammasimp(gamma(rho+2+n)/gamma(rho+2)) \ / gammasimp(gamma(3+n)/gamma(3))*v**n class coef_C(sympy.Function): """Calculate coefficients C_m for integer m. C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b) * g(rho, v)^(-m-1/2) """ nargs = 3 @classmethod def eval(cls, m, rho, beta): if not m >= 0: raise ValueError("must have m >= 0") v = symbols("v") expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2)) res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m) res = res * (gamma(m + Rational(1, 2)) / (2*pi) * (2/(rho+1))**(m + Rational(1, 2))) return res # in order to have nice ordering/sorting of expressions, we set a = xa. xa, b, xap1 = symbols("xa b xap1") C0 = coef_C(0, xa, b) # a1 = a(1, rho, beta) s = "Asymptotic expansion for large x\n" s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n" s += " * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n" s += "Z = pow(a * x, 1/(1+a))\n" s += "A[k] = pow(a, k)\n" s += "B[k] = pow(b, k)\n" s += "Ap1[k] = pow(1+a, k)\n\n" s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n" for i in range(1, order+1): expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify() factor = [x.denominator() for x in sympy.Poly(expr).coeffs()] factor = sympy.lcm(factor) expr = (expr * factor).simplify().collect(b, sympy.factor) expr = expr.xreplace({xa+1: xap1}) s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n" s += f"C[{i}] *= {str(expr)}\n\n" import re re_a = re.compile(r'xa\*\*(\d+)') s = re_a.sub(r'A[\1]', s) re_b = re.compile(r'b\*\*(\d+)') s = re_b.sub(r'B[\1]', s) s = s.replace('xap1', 'Ap1[1]') s = s.replace('xa', 'a') # max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10 # or more digits. re_digits = re.compile(r'(\d{10,})') s = re_digits.sub(r'\1.', s) return s def optimal_epsilon_integral(): """Fit optimal choice of epsilon for integral representation. The integrand of int_0^pi P(eps, a, b, x, phi) * dphi can exhibit oscillatory behaviour. It stems from the cosine of P and can be minimized by minimizing the arc length of the argument f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi of cos(f(phi)). We minimize the arc length in eps for a grid of values (a, b, x) and fit a parametric function to it. """ def fp(eps, a, b, x, phi): """Derivative of f w.r.t. phi.""" eps_a = np.power(1. * eps, -a) return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b def arclength(eps, a, b, x, epsrel=1e-2, limit=100): """Compute Arc length of f. Note that the arg length of a function f fro t0 to t1 is given by int_t0^t1 sqrt(1 + f'(t)^2) dt """ return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2), 0, np.pi, epsrel=epsrel, limit=100)[0] # grid of minimal arc length values data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8] data_b = [0, 1, 4, 7, 10] data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4] data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x) data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(), data_x.flatten()) best_eps = [] for i in range(data_x.size): best_eps.append( minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i], data_x[i]), bounds=(1e-3, 1000), method='Bounded', options={'xatol': 1e-3}).x ) best_eps = np.array(best_eps) # pandas would be nice, but here a dictionary is enough df = {'a': data_a, 'b': data_b, 'x': data_x, 'eps': best_eps, } def func(data, A0, A1, A2, A3, A4, A5): """Compute parametric function to fit.""" a = data['a'] b = data['b'] x = data['x'] return (A0 * b * np.exp(-0.5 * a) + np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a) + A4 / (1 + np.exp(A5 * a)))) func_params = list(curve_fit(func, df, df['eps'], method='trf')[0]) s = "Fit optimal eps for integrand P via minimal arc length\n" s += "with parametric function:\n" s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n" s += " - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n" s += "Fitted parameters A0 to A5 are:\n" s += ', '.join(['{:.5g}'.format(x) for x in func_params]) return s def main(): t0 = time() parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('action', type=int, choices=[1, 2, 3, 4], help='chose what expansion to precompute\n' '1 : Series for small a\n' '2 : Series for small a and small b\n' '3 : Asymptotic series for large x\n' ' This may take some time (>4h).\n' '4 : Fit optimal eps for integral representation.' ) args = parser.parse_args() switch = {1: lambda: print(series_small_a()), 2: lambda: print(series_small_a_small_b()), 3: lambda: print(asymptotic_series()), 4: lambda: print(optimal_epsilon_integral()) } switch.get(args.action, lambda: print("Invalid input."))() print("\n{:.1f} minutes elapsed.\n".format((time() - t0)/60)) if __name__ == '__main__': main()
import numpy as np from numpy.testing import assert_allclose, assert_equal import pytest import scipy.special as sc class TestInverseErrorFunction: def test_compliment(self): # Test erfcinv(1 - x) == erfinv(x) x = np.linspace(-1, 1, 101) assert_allclose(sc.erfcinv(1 - x), sc.erfinv(x), rtol=0, atol=1e-15) def test_literal_values(self): # calculated via https://keisan.casio.com/exec/system/1180573448 # for y = 0, 0.1, ... , 0.9 actual = sc.erfinv(np.linspace(0, 0.9, 10)) expected = [ 0, 0.08885599049425768701574, 0.1791434546212916764928, 0.27246271472675435562, 0.3708071585935579290583, 0.4769362762044698733814, 0.5951160814499948500193, 0.7328690779592168522188, 0.9061938024368232200712, 1.163087153676674086726, ] assert_allclose(actual, expected, rtol=0, atol=1e-15) @pytest.mark.parametrize( 'f, x, y', [ (sc.erfinv, -1, -np.inf), (sc.erfinv, 0, 0), (sc.erfinv, 1, np.inf), (sc.erfinv, -100, np.nan), (sc.erfinv, 100, np.nan), (sc.erfcinv, 0, np.inf), (sc.erfcinv, 1, -0.0), (sc.erfcinv, 2, -np.inf), (sc.erfcinv, -100, np.nan), (sc.erfcinv, 100, np.nan), ], ids=[ 'erfinv at lower bound', 'erfinv at midpoint', 'erfinv at upper bound', 'erfinv below lower bound', 'erfinv above upper bound', 'erfcinv at lower bound', 'erfcinv at midpoint', 'erfcinv at upper bound', 'erfcinv below lower bound', 'erfcinv above upper bound', ] ) def test_domain_bounds(self, f, x, y): assert_equal(f(x), y) def test_erfinv_asympt(self): # regression test for gh-12758: erfinv(x) loses precision at small x # expected values precomputed with mpmath: # >>> mpmath.dps=100 # >>> expected = [float(mpmath.erfinv(t)) for t in x] x = np.array([1e-20, 1e-15, 1e-14, 1e-10, 1e-8, 0.9e-7, 1.1e-7, 1e-6]) expected = np.array([8.86226925452758e-21, 8.862269254527581e-16, 8.86226925452758e-15, 8.862269254527581e-11, 8.86226925452758e-09, 7.97604232907484e-08, 9.74849617998037e-08, 8.8622692545299e-07]) assert_allclose(sc.erfinv(x), expected, rtol=1e-10) # also test the roundtrip consistency assert_allclose(sc.erf(sc.erfinv(x)), x, rtol=1e-10)
tylerjereddy/scipy
scipy/special/tests/test_erfinv.py
scipy/special/_precompute/wright_bessel.py
"""Small modules to cope with python 2 vs 3 incompatibilities inside numpy.distutils """ from __future__ import division, absolute_import, print_function import sys def get_exception(): return sys.exc_info()[1]
from __future__ import division, absolute_import, print_function import sys import os import shutil import mmap import pytest from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp from numpy import ( memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) from numpy.compat import Path from numpy import arange, allclose, asarray from numpy.testing import ( assert_, assert_equal, assert_array_equal, suppress_warnings ) class TestMemmap(object): def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.tempdir = mkdtemp() self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) def teardown(self): self.tmpfp.close() shutil.rmtree(self.tempdir) def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp # Test __del__ machinery, which handles cleanup # Read data back from file newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', shape=self.shape) assert_(allclose(self.data, newfp)) assert_array_equal(self.data, newfp) assert_equal(newfp.flags.writeable, False) def test_open_with_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp def test_unnamed_file(self): with TemporaryFile() as f: fp = memmap(f, dtype=self.dtype, shape=self.shape) del fp def test_attributes(self): offset = 1 mode = "w+" fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, shape=self.shape, offset=offset) assert_equal(offset, fp.offset) assert_equal(mode, fp.mode) del fp def test_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) abspath = os.path.abspath(tmpname) fp[:] = self.data[:] assert_equal(abspath, fp.filename) b = fp[:1] assert_equal(abspath, b.filename) del b del fp @pytest.mark.skipif(Path is None, reason="No pathlib.Path") def test_path(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', shape=self.shape) # os.path.realpath does not resolve symlinks on Windows # see: https://bugs.python.org/issue9949 # use Path.resolve, just as memmap class does internally abspath = str(Path(tmpname).resolve()) fp[:] = self.data[:] assert_equal(abspath, str(fp.filename.resolve())) b = fp[:1] assert_equal(abspath, str(b.filename.resolve())) del b del fp def test_filename_fileobj(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", shape=self.shape) assert_equal(fp.filename, self.tmpfp.name) @pytest.mark.skipif(sys.platform == 'gnu0', reason="Known to fail on hurd") def test_flush(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] assert_equal(fp[0], self.data[0]) fp.flush() def test_del(self): # Make sure a view does not delete the underlying mmap fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp_base[0] = 5 fp_view = fp_base[0:1] assert_equal(fp_view[0], 5) del fp_view # Should still be able to access and assign values after # deleting the view assert_equal(fp_base[0], 5) fp_base[0] = 6 assert_equal(fp_base[0], 6) def test_arithmetic_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = (fp + 10) if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_indexing_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = fp[(1, 2), (2, 3)] if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_slicing_keeps_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) assert_(fp[:2, :2]._mmap is fp._mmap) def test_view(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) new1 = fp.view() new2 = new1.view() assert_(new1.base is fp) assert_(new2.base is fp) new_array = asarray(fp) assert_(new_array.base is fp) def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data with suppress_warnings() as sup: sup.filter(FutureWarning, "np.average currently does not preserve") for unary_op in [sum, average, product]: result = unary_op(fp) assert_(isscalar(result)) assert_(result.__class__ is self.data[0, 0].__class__) assert_(unary_op(fp, axis=0).__class__ is ndarray) assert_(unary_op(fp, axis=1).__class__ is ndarray) for binary_op in [add, subtract, multiply]: assert_(binary_op(fp, self.data).__class__ is ndarray) assert_(binary_op(self.data, fp).__class__ is ndarray) assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 assert(fp.__class__ is memmap) add(fp, 1, out=fp) assert(fp.__class__ is memmap) def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data assert_(fp[1:, :-1].__class__ is memmap) # Fancy indexing returns a copy that is not memmapped assert_(fp[[0, 1]].__class__ is ndarray) def test_memmap_subclass(self): class MemmapSubClass(memmap): pass fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the # ufunc and __getitem__ output is never turned into a ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) assert(fp[[0, 1]].__class__ is MemmapSubClass) def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY offset = mmap.ALLOCATIONGRANULARITY + 1 fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) def test_no_shape(self): self.tmpfp.write(b'a'*16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): memmap(self.tmpfp, shape=(0,4), mode='w+') self.tmpfp.write(b'\0') # ok now the file is not empty memmap(self.tmpfp, shape=(0,4), mode='w+')
pizzathief/numpy
numpy/core/tests/test_memmap.py
numpy/distutils/compat.py
""" Back compatibility nosetester module. It will import the appropriate set of tools """ from __future__ import division, absolute_import, print_function import warnings # 2018-04-04, numpy 1.15.0 warnings.warn("Importing from numpy.testing.nosetester is deprecated " "since 1.15.0, import from numpy.testing instead.", DeprecationWarning, stacklevel=2) from ._private.nosetester import * __all__ = ['get_package_name', 'run_module_suite', 'NoseTester', '_numpy_tester', 'get_package_name', 'import_nose', 'suppress_warnings']
from __future__ import division, absolute_import, print_function import sys import os import shutil import mmap import pytest from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp from numpy import ( memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) from numpy.compat import Path from numpy import arange, allclose, asarray from numpy.testing import ( assert_, assert_equal, assert_array_equal, suppress_warnings ) class TestMemmap(object): def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.tempdir = mkdtemp() self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) def teardown(self): self.tmpfp.close() shutil.rmtree(self.tempdir) def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp # Test __del__ machinery, which handles cleanup # Read data back from file newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', shape=self.shape) assert_(allclose(self.data, newfp)) assert_array_equal(self.data, newfp) assert_equal(newfp.flags.writeable, False) def test_open_with_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp def test_unnamed_file(self): with TemporaryFile() as f: fp = memmap(f, dtype=self.dtype, shape=self.shape) del fp def test_attributes(self): offset = 1 mode = "w+" fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, shape=self.shape, offset=offset) assert_equal(offset, fp.offset) assert_equal(mode, fp.mode) del fp def test_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) abspath = os.path.abspath(tmpname) fp[:] = self.data[:] assert_equal(abspath, fp.filename) b = fp[:1] assert_equal(abspath, b.filename) del b del fp @pytest.mark.skipif(Path is None, reason="No pathlib.Path") def test_path(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', shape=self.shape) # os.path.realpath does not resolve symlinks on Windows # see: https://bugs.python.org/issue9949 # use Path.resolve, just as memmap class does internally abspath = str(Path(tmpname).resolve()) fp[:] = self.data[:] assert_equal(abspath, str(fp.filename.resolve())) b = fp[:1] assert_equal(abspath, str(b.filename.resolve())) del b del fp def test_filename_fileobj(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", shape=self.shape) assert_equal(fp.filename, self.tmpfp.name) @pytest.mark.skipif(sys.platform == 'gnu0', reason="Known to fail on hurd") def test_flush(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] assert_equal(fp[0], self.data[0]) fp.flush() def test_del(self): # Make sure a view does not delete the underlying mmap fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp_base[0] = 5 fp_view = fp_base[0:1] assert_equal(fp_view[0], 5) del fp_view # Should still be able to access and assign values after # deleting the view assert_equal(fp_base[0], 5) fp_base[0] = 6 assert_equal(fp_base[0], 6) def test_arithmetic_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = (fp + 10) if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_indexing_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = fp[(1, 2), (2, 3)] if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_slicing_keeps_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) assert_(fp[:2, :2]._mmap is fp._mmap) def test_view(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) new1 = fp.view() new2 = new1.view() assert_(new1.base is fp) assert_(new2.base is fp) new_array = asarray(fp) assert_(new_array.base is fp) def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data with suppress_warnings() as sup: sup.filter(FutureWarning, "np.average currently does not preserve") for unary_op in [sum, average, product]: result = unary_op(fp) assert_(isscalar(result)) assert_(result.__class__ is self.data[0, 0].__class__) assert_(unary_op(fp, axis=0).__class__ is ndarray) assert_(unary_op(fp, axis=1).__class__ is ndarray) for binary_op in [add, subtract, multiply]: assert_(binary_op(fp, self.data).__class__ is ndarray) assert_(binary_op(self.data, fp).__class__ is ndarray) assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 assert(fp.__class__ is memmap) add(fp, 1, out=fp) assert(fp.__class__ is memmap) def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data assert_(fp[1:, :-1].__class__ is memmap) # Fancy indexing returns a copy that is not memmapped assert_(fp[[0, 1]].__class__ is ndarray) def test_memmap_subclass(self): class MemmapSubClass(memmap): pass fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the # ufunc and __getitem__ output is never turned into a ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) assert(fp[[0, 1]].__class__ is MemmapSubClass) def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY offset = mmap.ALLOCATIONGRANULARITY + 1 fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) def test_no_shape(self): self.tmpfp.write(b'a'*16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): memmap(self.tmpfp, shape=(0,4), mode='w+') self.tmpfp.write(b'\0') # ok now the file is not empty memmap(self.tmpfp, shape=(0,4), mode='w+')
pizzathief/numpy
numpy/core/tests/test_memmap.py
numpy/testing/nosetester.py
""" This is only meant to add docs to objects defined in C-extension modules. The purpose is to allow easier editing of the docstrings without requiring a re-compile. NOTE: Many of the methods of ndarray have corresponding functions. If you update these docstrings, please keep also the ones in core/fromnumeric.py, core/defmatrix.py up-to-date. """ from __future__ import division, absolute_import, print_function import sys from numpy.core import numerictypes as _numerictypes from numpy.core import dtype from numpy.core.function_base import add_newdoc ############################################################################### # # flatiter # # flatiter needs a toplevel description # ############################################################################### add_newdoc('numpy.core', 'flatiter', """ Flat iterator object to iterate over arrays. A `flatiter` iterator is returned by ``x.flat`` for any array `x`. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in row-major, C-style order (the last index varying the fastest). The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- ndarray.flat : Return a flat iterator over an array. ndarray.flatten : Returns a flattened copy of an array. Notes ----- A `flatiter` iterator can not be constructed directly from Python code by calling the `flatiter` constructor. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> type(fl) <class 'numpy.flatiter'> >>> for item in fl: ... print(item) ... 0 1 2 3 4 5 >>> fl[2:4] array([2, 3]) """) # flatiter attributes add_newdoc('numpy.core', 'flatiter', ('base', """ A reference to the array that is iterated over. Examples -------- >>> x = np.arange(5) >>> fl = x.flat >>> fl.base is x True """)) add_newdoc('numpy.core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.coords (0, 0) >>> next(fl) 0 >>> fl.coords (0, 1) """)) add_newdoc('numpy.core', 'flatiter', ('index', """ Current flat index into the array. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.index 0 >>> next(fl) 0 >>> fl.index 1 """)) # flatiter functions add_newdoc('numpy.core', 'flatiter', ('__array__', """__array__(type=None) Get array from iterator """)) add_newdoc('numpy.core', 'flatiter', ('copy', """ copy() Get a copy of the iterator as a 1-D array. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> fl = x.flat >>> fl.copy() array([0, 1, 2, 3, 4, 5]) """)) ############################################################################### # # nditer # ############################################################################### add_newdoc('numpy.core', 'nditer', """ Efficient multi-dimensional iterator object to iterate over arrays. To get started using this object, see the :ref:`introductory guide to array iteration <arrays.nditer>`. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. flags : sequence of str, optional Flags to control the behavior of the iterator. * ``buffered`` enables buffering when required. * ``c_index`` causes a C-order index to be tracked. * ``f_index`` causes a Fortran-order index to be tracked. * ``multi_index`` causes a multi-index, or a tuple of indices with one per iteration dimension, to be tracked. * ``common_dtype`` causes all the operands to be converted to a common data type, with copying or buffering as necessary. * ``copy_if_overlap`` causes the iterator to determine if read operands have overlap with write operands, and make temporary copies as necessary to avoid overlap. False positives (needless copying) are possible in some cases. * ``delay_bufalloc`` delays allocation of the buffers until a reset() call is made. Allows ``allocate`` operands to be initialized before their values are copied into the buffers. * ``external_loop`` causes the ``values`` given to be one-dimensional arrays with multiple values instead of zero-dimensional arrays. * ``grow_inner`` allows the ``value`` array sizes to be made larger than the buffer size when both ``buffered`` and ``external_loop`` is used. * ``ranged`` allows the iterator to be restricted to a sub-range of the iterindex values. * ``refs_ok`` enables iteration of reference types, such as object arrays. * ``reduce_ok`` enables iteration of ``readwrite`` operands which are broadcasted, also known as reduction operands. * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional This is a list of flags for each operand. At minimum, one of ``readonly``, ``readwrite``, or ``writeonly`` must be specified. * ``readonly`` indicates the operand will only be read from. * ``readwrite`` indicates the operand will be read from and written to. * ``writeonly`` indicates the operand will only be written to. * ``no_broadcast`` prevents the operand from being broadcasted. * ``contig`` forces the operand data to be contiguous. * ``aligned`` forces the operand data to be aligned. * ``nbo`` forces the operand data to be in native byte order. * ``copy`` allows a temporary read-only copy if required. * ``updateifcopy`` allows a temporary read-write copy if required. * ``allocate`` causes the array to be allocated if it is None in the ``op`` parameter. * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. * ``arraymask`` indicates that this operand is the mask to use for selecting elements when writing to operands with the 'writemasked' flag set. The iterator does not enforce this, but when writing from a buffer back to the array, it only copies those elements indicated by this mask. * ``writemasked`` indicates that only elements where the chosen ``arraymask`` operand is True will be written to. * ``overlap_assume_elementwise`` can be used to mark operands that are accessed only in the iterator order, to allow less conservative copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. order : {'C', 'F', 'A', 'K'}, optional Controls the iteration order. 'C' means C order, 'F' means Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. This also affects the element memory order of ``allocate`` operands, as they are allocated to be compatible with iteration order. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur when making a copy or buffering. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. op_axes : list of list of ints, optional If provided, is a list of ints or None for each operands. The list of axes for an operand is a mapping from the dimensions of the iterator to the dimensions of the operand. A value of -1 can be placed for entries, causing that dimension to be treated as `newaxis`. itershape : tuple of ints, optional The desired shape of the iterator. This allows ``allocate`` operands with a dimension mapped by op_axes not corresponding to a dimension of a different operand to get a value not equal to 1 for that dimension. buffersize : int, optional When buffering is enabled, controls the size of the temporary buffers. Set to 0 for the default value. Attributes ---------- dtypes : tuple of dtype(s) The data types of the values provided in `value`. This may be different from the operand data types if buffering is enabled. Valid only before the iterator is closed. finished : bool Whether the iteration over the operands is finished or not. has_delayed_bufalloc : bool If True, the iterator was created with the ``delay_bufalloc`` flag, and no reset() function was called on it yet. has_index : bool If True, the iterator was created with either the ``c_index`` or the ``f_index`` flag, and the property `index` can be used to retrieve it. has_multi_index : bool If True, the iterator was created with the ``multi_index`` flag, and the property `multi_index` can be used to retrieve it. index When the ``c_index`` or ``f_index`` flag was used, this property provides access to the index. Raises a ValueError if accessed and ``has_index`` is False. iterationneedsapi : bool Whether iteration requires access to the Python API, for example if one of the operands is an object array. iterindex : int An index which matches the order of iteration. itersize : int Size of the iterator. itviews Structured view(s) of `operands` in memory, matching the reordered and optimized iterator access pattern. Valid only before the iterator is closed. multi_index When the ``multi_index`` flag was used, this property provides access to the index. Raises a ValueError if accessed accessed and ``has_multi_index`` is False. ndim : int The dimensions of the iterator. nop : int The number of iterator operands. operands : tuple of operand(s) The array(s) to be iterated over. Valid only before the iterator is closed. shape : tuple of ints Shape tuple, the shape of the iterator. value Value of ``operands`` at current iteration. Normally, this is a tuple of array scalars, but if the flag ``external_loop`` is used, it is a tuple of one dimensional arrays. Notes ----- `nditer` supersedes `flatiter`. The iterator implementation behind `nditer` is also exposed by the NumPy C API. The Python exposure supplies two iteration interfaces, one which follows the Python iterator protocol, and another which mirrors the C-style do-while pattern. The native Python approach is better in most cases, but if you need the coordinates or index of an iterator, use the C-style pattern. Examples -------- Here is how we might write an ``iter_add`` function, using the Python iterator protocol: >>> def iter_add_py(x, y, out=None): ... addop = np.add ... it = np.nditer([x, y, out], [], ... [['readonly'], ['readonly'], ['writeonly','allocate']]) ... with it: ... for (a, b, c) in it: ... addop(a, b, out=c) ... return it.operands[2] Here is the same function, but following the C-style pattern: >>> def iter_add(x, y, out=None): ... addop = np.add ... it = np.nditer([x, y, out], [], ... [['readonly'], ['readonly'], ['writeonly','allocate']]) ... with it: ... while not it.finished: ... addop(it[0], it[1], out=it[2]) ... it.iternext() ... return it.operands[2] Here is an example outer product function: >>> def outer_it(x, y, out=None): ... mulop = np.multiply ... it = np.nditer([x, y, out], ['external_loop'], ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, ... [-1] * x.ndim + list(range(y.ndim)), ... None]) ... with it: ... for (a, b, c) in it: ... mulop(a, b, out=c) ... return it.operands[2] >>> a = np.arange(2)+1 >>> b = np.arange(3)+1 >>> outer_it(a,b) array([[1, 2, 3], [2, 4, 6]]) Here is an example function which operates like a "lambda" ufunc: >>> def luf(lamdaexpr, *args, **kwargs): ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' ... nargs = len(args) ... op = (kwargs.get('out',None),) + args ... it = np.nditer(op, ['buffered','external_loop'], ... [['writeonly','allocate','no_broadcast']] + ... [['readonly','nbo','aligned']]*nargs, ... order=kwargs.get('order','K'), ... casting=kwargs.get('casting','safe'), ... buffersize=kwargs.get('buffersize',0)) ... while not it.finished: ... it[0] = lamdaexpr(*it[1:]) ... it.iternext() ... return it.operands[0] >>> a = np.arange(5) >>> b = np.ones(5) >>> luf(lambda i,j:i*i + j/2, a, b) array([ 0.5, 1.5, 4.5, 9.5, 16.5]) If operand flags `"writeonly"` or `"readwrite"` are used the operands may be views into the original data with the `WRITEBACKIFCOPY` flag. In this case nditer must be used as a context manager or the nditer.close method must be called before using the result. The temporary data will be written back to the original data when the `__exit__` function is called but not before: >>> a = np.arange(6, dtype='i4')[::-2] >>> with np.nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', ... op_dtypes=[np.dtype('f4')]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here >>> a, x (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) It is important to note that once the iterator is exited, dangling references (like `x` in the example) may or may not share data with the original data `a`. If writeback semantics were active, i.e. if `x.base.flags.writebackifcopy` is `True`, then exiting the iterator will sever the connection between `x` and `a`, writing to `x` will no longer write to `a`. If writeback semantics are not active, then `x.data` will still point at some part of `a.data`, and writing to one will affect the other. """) # nditer methods add_newdoc('numpy.core', 'nditer', ('copy', """ copy() Get a copy of the iterator in its current state. Examples -------- >>> x = np.arange(10) >>> y = x + 1 >>> it = np.nditer([x, y]) >>> next(it) (array(0), array(1)) >>> it2 = it.copy() >>> next(it2) (array(1), array(2)) """)) add_newdoc('numpy.core', 'nditer', ('operands', """ operands[`Slice`] The array(s) to be iterated over. Valid only before the iterator is closed. """)) add_newdoc('numpy.core', 'nditer', ('debug_print', """ debug_print() Print the current state of the `nditer` instance and debug info to stdout. """)) add_newdoc('numpy.core', 'nditer', ('enable_external_loop', """ enable_external_loop() When the "external_loop" was not used during construction, but is desired, this modifies the iterator to behave as if the flag was specified. """)) add_newdoc('numpy.core', 'nditer', ('iternext', """ iternext() Check whether iterations are left, and perform a single internal iteration without returning the result. Used in the C-style pattern do-while pattern. For an example, see `nditer`. Returns ------- iternext : bool Whether or not there are iterations left. """)) add_newdoc('numpy.core', 'nditer', ('remove_axis', """ remove_axis(i) Removes axis `i` from the iterator. Requires that the flag "multi_index" be enabled. """)) add_newdoc('numpy.core', 'nditer', ('remove_multi_index', """ remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing the internal iteration structure to be optimized further. """)) add_newdoc('numpy.core', 'nditer', ('reset', """ reset() Reset the iterator to its initial state. """)) add_newdoc('numpy.core', 'nested_iters', """ Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the outermost loop, the last in the innermost loop. Advancing one will change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. axes : list of list of int Each item is used as an "op_axes" argument to an nditer flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name Returns ------- iters : tuple of nditer An nditer for each item in `axes`, outermost first See Also -------- nditer Examples -------- Basic usage. Note how y is the "flattened" version of [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified the first iter's axes as [1] >>> a = np.arange(12).reshape(2, 3, 2) >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) >>> for x in i: ... print(i.multi_index) ... for y in j: ... print('', j.multi_index, y) (0,) (0, 0) 0 (0, 1) 1 (1, 0) 6 (1, 1) 7 (1,) (0, 0) 2 (0, 1) 3 (1, 0) 8 (1, 1) 9 (2,) (0, 0) 4 (0, 1) 5 (1, 0) 10 (1, 1) 11 """) add_newdoc('numpy.core', 'nditer', ('close', """ close() Resolve all writeback semantics in writeable operands. See Also -------- :ref:`nditer-context-manager` """)) ############################################################################### # # broadcast # ############################################################################### add_newdoc('numpy.core', 'broadcast', """ Produce an object that mimics broadcasting. Parameters ---------- in1, in2, ... : array_like Input parameters. Returns ------- b : broadcast object Broadcast the input parameters against one another, and return an object that encapsulates the result. Amongst others, it has ``shape`` and ``nd`` properties, and may be used as an iterator. See Also -------- broadcast_arrays broadcast_to Examples -------- Manually adding two vectors, using broadcasting: >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) >>> out = np.empty(b.shape) >>> out.flat = [u+v for (u,v) in b] >>> out array([[5., 6., 7.], [6., 7., 8.], [7., 8., 9.]]) Compare against built-in broadcasting: >>> x + y array([[5, 6, 7], [6, 7, 8], [7, 8, 9]]) """) # attributes add_newdoc('numpy.core', 'broadcast', ('index', """ current index in broadcasted result Examples -------- >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) >>> b.index 0 >>> next(b), next(b), next(b) ((1, 4), (1, 5), (1, 6)) >>> b.index 3 """)) add_newdoc('numpy.core', 'broadcast', ('iters', """ tuple of iterators along ``self``'s "components." Returns a tuple of `numpy.flatiter` objects, one for each "component" of ``self``. See Also -------- numpy.flatiter Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> row, col = b.iters >>> next(row), next(col) (1, 4) """)) add_newdoc('numpy.core', 'broadcast', ('ndim', """ Number of dimensions of broadcasted result. Alias for `nd`. .. versionadded:: 1.12.0 Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.ndim 2 """)) add_newdoc('numpy.core', 'broadcast', ('nd', """ Number of dimensions of broadcasted result. For code intended for NumPy 1.12.0 and later the more consistent `ndim` is preferred. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.nd 2 """)) add_newdoc('numpy.core', 'broadcast', ('numiter', """ Number of iterators possessed by the broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.numiter 2 """)) add_newdoc('numpy.core', 'broadcast', ('shape', """ Shape of broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.shape (3, 3) """)) add_newdoc('numpy.core', 'broadcast', ('size', """ Total size of broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.size 9 """)) add_newdoc('numpy.core', 'broadcast', ('reset', """ reset() Reset the broadcasted result's iterator(s). Parameters ---------- None Returns ------- None Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.index 0 >>> next(b), next(b), next(b) ((1, 4), (2, 4), (3, 4)) >>> b.index 3 >>> b.reset() >>> b.index 0 """)) ############################################################################### # # numpy functions # ############################################################################### add_newdoc('numpy.core.multiarray', 'array', """ array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0) Create an array. Parameters ---------- object : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. dtype : data-type, optional The desired data-type for the array. If not given, then the type will be determined as the minimum type required to hold the objects in the sequence. copy : bool, optional If true (default), then the object is copied. Otherwise, a copy will only be made if __array__ returns a copy, if obj is a nested sequence, or if a copy is needed to satisfy any of the other requirements (`dtype`, `order`, etc.). order : {'K', 'A', 'C', 'F'}, optional Specify the memory layout of the array. If object is not an array, the newly created array will be in C order (row major) unless 'F' is specified, in which case it will be in Fortran order (column major). If object is an array the following holds. ===== ========= =================================================== order no copy copy=True ===== ========= =================================================== 'K' unchanged F & C order preserved, otherwise most similar order 'A' unchanged F order if input is F and not C, otherwise C order 'C' C order C order 'F' F order F order ===== ========= =================================================== When ``copy=False`` and a copy is made for other reasons, the result is the same as if ``copy=True``, with some exceptions for `A`, see the Notes section. The default order is 'K'. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). ndmin : int, optional Specifies the minimum number of dimensions that the resulting array should have. Ones will be pre-pended to the shape as needed to meet this requirement. Returns ------- out : ndarray An array object satisfying the specified requirements. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Notes ----- When order is 'A' and `object` is an array in neither 'C' nor 'F' order, and a copy is forced by a change in dtype, then the order of the result is not necessarily 'C' as expected. This is likely a bug. Examples -------- >>> np.array([1, 2, 3]) array([1, 2, 3]) Upcasting: >>> np.array([1, 2, 3.0]) array([ 1., 2., 3.]) More than one dimension: >>> np.array([[1, 2], [3, 4]]) array([[1, 2], [3, 4]]) Minimum dimensions 2: >>> np.array([1, 2, 3], ndmin=2) array([[1, 2, 3]]) Type provided: >>> np.array([1, 2, 3], dtype=complex) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: >>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')]) >>> x['a'] array([1, 3]) Creating an array from sub-classes: >>> np.array(np.mat('1 2; 3 4')) array([[1, 2], [3, 4]]) >>> np.array(np.mat('1 2; 3 4'), subok=True) matrix([[1, 2], [3, 4]]) """) add_newdoc('numpy.core.multiarray', 'empty', """ empty(shape, dtype=float, order='C') Return a new array of given shape and type, without initializing entries. Parameters ---------- shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional Desired output data-type for the array, e.g, `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- out : ndarray Array of uninitialized (arbitrary) data of the given shape, dtype, and order. Object arrays will be initialized to None. See Also -------- empty_like : Return an empty array with shape and type of input. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Notes ----- `empty`, unlike `zeros`, does not set the array values to zero, and may therefore be marginally faster. On the other hand, it requires the user to manually set all the values in the array, and should be used with caution. Examples -------- >>> np.empty([2, 2]) array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized >>> np.empty([2, 2], dtype=int) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized """) add_newdoc('numpy.core.multiarray', 'scalar', """ scalar(dtype, obj) Return a new scalar array of the given type initialized with obj. This function is meant mainly for pickle support. `dtype` must be a valid data-type descriptor. If `dtype` corresponds to an object descriptor, then `obj` can be any object, otherwise `obj` must be a string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. """) add_newdoc('numpy.core.multiarray', 'zeros', """ zeros(shape, dtype=float, order='C') Return a new array of given shape and type, filled with zeros. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- out : ndarray Array of zeros with the given shape, dtype, and order. See Also -------- zeros_like : Return an array of zeros with shape and type of input. empty : Return a new uninitialized array. ones : Return a new array setting values to one. full : Return a new array of given shape filled with value. Examples -------- >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) >>> np.zeros((5,), dtype=int) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) array([[ 0.], [ 0.]]) >>> s = (2,2) >>> np.zeros(s) array([[ 0., 0.], [ 0., 0.]]) >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype array([(0, 0), (0, 0)], dtype=[('x', '<i4'), ('y', '<i4')]) """) add_newdoc('numpy.core.multiarray', 'set_typeDict', """set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. """) add_newdoc('numpy.core.multiarray', 'fromstring', """ fromstring(string, dtype=float, count=-1, sep='') A new 1-D array initialized from text data in a string. Parameters ---------- string : str A string containing the data. dtype : data-type, optional The data type of the array; default: float. For binary input data, the data must be in exactly this format. count : int, optional Read this number of `dtype` elements from the data. If this is negative (the default), the count will be determined from the length of the data. sep : str, optional The string separating numbers in the data; extra whitespace between elements is also ignored. .. deprecated:: 1.14 Passing ``sep=''``, the default, is deprecated since it will trigger the deprecated binary mode of this function. This mode interprets `string` as binary bytes, rather than ASCII text with decimal numbers, an operation which is better spelt ``frombuffer(string, dtype, count)``. If `string` contains unicode text, the binary mode of `fromstring` will first encode it into bytes using either utf-8 (python 3) or the default encoding (python 2), neither of which produce sane results. Returns ------- arr : ndarray The constructed array. Raises ------ ValueError If the string is not the correct size to satisfy the requested `dtype` and `count`. See Also -------- frombuffer, fromfile, fromiter Examples -------- >>> np.fromstring('1 2', dtype=int, sep=' ') array([1, 2]) >>> np.fromstring('1, 2', dtype=int, sep=',') array([1, 2]) """) add_newdoc('numpy.core.multiarray', 'compare_chararrays', """ compare_chararrays(a, b, cmp_op, rstrip) Performs element-wise comparison of two string arrays using the comparison operator specified by `cmp_op`. Parameters ---------- a, b : array_like Arrays to be compared. cmp_op : {"<", "<=", "==", ">=", ">", "!="} Type of comparison. rstrip : Boolean If True, the spaces at the end of Strings are removed before the comparison. Returns ------- out : ndarray The output array of type Boolean with the same shape as a and b. Raises ------ ValueError If `cmp_op` is not valid. TypeError If at least one of `a` or `b` is a non-string array Examples -------- >>> a = np.array(["a", "b", "cde"]) >>> b = np.array(["a", "a", "dec"]) >>> np.compare_chararrays(a, b, ">", True) array([False, True, False]) """) add_newdoc('numpy.core.multiarray', 'fromiter', """ fromiter(iterable, dtype, count=-1) Create a new 1-dimensional array from an iterable object. Parameters ---------- iterable : iterable object An iterable object providing data for the array. dtype : data-type The data-type of the returned array. count : int, optional The number of items to read from *iterable*. The default is -1, which means all data is read. Returns ------- out : ndarray The output array. Notes ----- Specify `count` to improve performance. It allows ``fromiter`` to pre-allocate the output array, instead of resizing it on demand. Examples -------- >>> iterable = (x*x for x in range(5)) >>> np.fromiter(iterable, float) array([ 0., 1., 4., 9., 16.]) """) add_newdoc('numpy.core.multiarray', 'fromfile', """ fromfile(file, dtype=float, count=-1, sep='', offset=0) Construct an array from data in a text or binary file. A highly efficient way of reading binary data with a known data-type, as well as parsing simply formatted text files. Data written using the `tofile` method can be read using this function. Parameters ---------- file : file or str or Path Open file object or filename. .. versionchanged:: 1.17.0 `pathlib.Path` objects are now accepted. dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order of the items in the file. count : int Number of items to read. ``-1`` means all items (i.e., the complete file). sep : str Separator between items if file is a text file. Empty ("") separator means the file should be treated as binary. Spaces (" ") in the separator match zero or more whitespace characters. A separator consisting only of spaces must match at least one whitespace. offset : int The offset (in bytes) from the file's current position. Defaults to 0. Only permitted for binary files. .. versionadded:: 1.17.0 See also -------- load, save ndarray.tofile loadtxt : More flexible way of loading data from a text file. Notes ----- Do not rely on the combination of `tofile` and `fromfile` for data storage, as the binary files generated are are not platform independent. In particular, no byte-order or data-type information is saved. Data can be stored in the platform independent ``.npy`` format using `save` and `load` instead. Examples -------- Construct an ndarray: >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), ... ('temp', float)]) >>> x = np.zeros((1,), dtype=dt) >>> x['time']['min'] = 10; x['temp'] = 98.25 >>> x array([((10, 0), 98.25)], dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) Save the raw data to disk: >>> import tempfile >>> fname = tempfile.mkstemp()[1] >>> x.tofile(fname) Read the raw data from disk: >>> np.fromfile(fname, dtype=dt) array([((10, 0), 98.25)], dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) The recommended way to store and load data: >>> np.save(fname, x) >>> np.load(fname + '.npy') array([((10, 0), 98.25)], dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) """) add_newdoc('numpy.core.multiarray', 'frombuffer', """ frombuffer(buffer, dtype=float, count=-1, offset=0) Interpret a buffer as a 1-dimensional array. Parameters ---------- buffer : buffer_like An object that exposes the buffer interface. dtype : data-type, optional Data-type of the returned array; default: float. count : int, optional Number of items to read. ``-1`` means all data in the buffer. offset : int, optional Start reading the buffer from this offset (in bytes); default: 0. Notes ----- If the buffer has data that is not in machine byte-order, this should be specified as part of the data-type, e.g.:: >>> dt = np.dtype(int) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP The data of the resulting array will not be byteswapped, but will be interpreted correctly. Examples -------- >>> s = b'hello world' >>> np.frombuffer(s, dtype='S1', count=5, offset=6) array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) array([1, 2], dtype=uint8) >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) array([1, 2, 3], dtype=uint8) """) add_newdoc('numpy.core', 'fastCopyAndTranspose', """_fastCopyAndTranspose(a)""") add_newdoc('numpy.core.multiarray', 'correlate', """cross_correlate(a,v, mode=0)""") add_newdoc('numpy.core.multiarray', 'arange', """ arange([start,] stop[, step,], dtype=None) Return evenly spaced values within a given interval. Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start` but excluding `stop`). For integer arguments the function is equivalent to the Python built-in `range` function, but returns an ndarray rather than a list. When using a non-integer step, such as 0.1, the results will often not be consistent. It is better to use `numpy.linspace` for these cases. Parameters ---------- start : number, optional Start of interval. The interval includes this value. The default start value is 0. stop : number End of interval. The interval does not include this value, except in some cases where `step` is not an integer and floating point round-off affects the length of `out`. step : number, optional Spacing between values. For any output `out`, this is the distance between two adjacent values, ``out[i+1] - out[i]``. The default step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. Returns ------- arange : ndarray Array of evenly spaced values. For floating point arguments, the length of the result is ``ceil((stop - start)/step)``. Because of floating point overflow, this rule may result in the last element of `out` being greater than `stop`. See Also -------- linspace : Evenly spaced numbers with careful handling of endpoints. ogrid: Arrays of evenly spaced numbers in N-dimensions. mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. Examples -------- >>> np.arange(3) array([0, 1, 2]) >>> np.arange(3.0) array([ 0., 1., 2.]) >>> np.arange(3,7) array([3, 4, 5, 6]) >>> np.arange(3,7,2) array([3, 5]) """) add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', """_get_ndarray_c_version() Return the compile time NDARRAY_VERSION number. """) add_newdoc('numpy.core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) Construct an empty array. Used by Pickles. """) add_newdoc('numpy.core.multiarray', 'set_string_function', """ set_string_function(f, repr=1) Internal method to set a function to be used when pretty printing arrays. """) add_newdoc('numpy.core.multiarray', 'set_numeric_ops', """ set_numeric_ops(op1=func1, op2=func2, ...) Set numerical operators for array objects. .. deprecated:: 1.16 For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. For ndarray subclasses, define the ``__array_ufunc__`` method and override the relevant ufunc. Parameters ---------- op1, op2, ... : callable Each ``op = func`` pair describes an operator to be replaced. For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace addition by modulus 5 addition. Returns ------- saved_ops : list of callables A list of all operators, stored before making replacements. Notes ----- .. WARNING:: Use with care! Incorrect usage may lead to memory errors. A function replacing an operator cannot make use of that operator. For example, when replacing add, you may not use ``+``. Instead, directly call ufuncs. Examples -------- >>> def add_mod5(x, y): ... return np.add(x, y) % 5 ... >>> old_funcs = np.set_numeric_ops(add=add_mod5) >>> x = np.arange(12).reshape((3, 4)) >>> x + x array([[0, 2, 4, 1], [3, 0, 2, 4], [1, 3, 0, 2]]) >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators """) add_newdoc('numpy.core.multiarray', 'promote_types', """ promote_types(type1, type2) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. The returned data type is always in native byte order. This function is symmetric, but rarely associative. Parameters ---------- type1 : dtype or dtype specifier First data type. type2 : dtype or dtype specifier Second data type. Returns ------- out : dtype The promoted data type. Notes ----- .. versionadded:: 1.6.0 Starting in NumPy 1.9, promote_types function now returns a valid string length when given an integer or float dtype as one argument and a string dtype as another argument. Previously it always returned the input string dtype, even if it wasn't long enough to store the max integer/float value converted to a string. See Also -------- result_type, dtype, can_cast Examples -------- >>> np.promote_types('f4', 'f8') dtype('float64') >>> np.promote_types('i8', 'f4') dtype('float64') >>> np.promote_types('>i8', '<c8') dtype('complex128') >>> np.promote_types('i4', 'S8') dtype('S11') An example of a non-associative case: >>> p = np.promote_types >>> p('S', p('i1', 'u1')) dtype('S6') >>> p(p('S', 'i1'), 'u1') dtype('S4') """) if sys.version_info.major < 3: add_newdoc('numpy.core.multiarray', 'newbuffer', """ newbuffer(size) Return a new uninitialized buffer object. Parameters ---------- size : int Size in bytes of returned buffer object. Returns ------- newbuffer : buffer object Returned, uninitialized buffer object of `size` bytes. """) add_newdoc('numpy.core.multiarray', 'getbuffer', """ getbuffer(obj [,offset[, size]]) Create a buffer object from the given object referencing a slice of length size starting at offset. Default is the entire buffer. A read-write buffer is attempted followed by a read-only buffer. Parameters ---------- obj : object offset : int, optional size : int, optional Returns ------- buffer_obj : buffer Examples -------- >>> buf = np.getbuffer(np.ones(5), 1, 3) >>> len(buf) 3 >>> buf[0] '\\x00' >>> buf <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0> """) add_newdoc('numpy.core.multiarray', 'c_einsum', """ c_einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe') *This documentation shadows that of the native python implementation of the `einsum` function, except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In *implicit* mode `einsum` computes these values. In *explicit* mode, `einsum` provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels. See the notes and examples for clarification. Parameters ---------- subscripts : str Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator '->' is included as well as subscript labels of the precise output form. operands : list of array_like These are the arrays for the operation. out : ndarray, optional If provided, the calculation is done into this array. dtype : {data-type, None}, optional If provided, forces the calculation to use the data type specified. Note that you may have to also give a more liberal `casting` parameter to allow the conversions. Default is None. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the output. 'C' means it should be C contiguous. 'F' means it should be Fortran contiguous, 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. 'K' means it should be as close to the layout as the inputs as is possible, including arbitrarily permuted axes. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. Default is 'safe'. optimize : {False, True, 'greedy', 'optimal'}, optional Controls if intermediate optimization should occur. No optimization will occur if False and True will default to the 'greedy' algorithm. Also accepts an explicit contraction list from the ``np.einsum_path`` function. See ``np.einsum_path`` for more details. Defaults to False. Returns ------- output : ndarray The calculation based on the Einstein summation convention. See Also -------- einsum_path, dot, inner, outer, tensordot, linalg.multi_dot Notes ----- .. versionadded:: 1.6.0 The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. A non-exhaustive list of these operations, which can be computed by `einsum`, is shown below along with examples: * Trace of an array, :py:func:`numpy.trace`. * Return a diagonal, :py:func:`numpy.diag`. * Array axis summations, :py:func:`numpy.sum`. * Transpositions and permutations, :py:func:`numpy.transpose`. * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. * Tensor contractions, :py:func:`numpy.tensordot`. * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label appears only once, it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication and is equivalent to :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent to :py:func:`np.trace(a) <numpy.trace>`. In *implicit mode*, the chosen subscripts are important since the axes of the output are reordered alphabetically. This means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while ``np.einsum('ji', a)`` takes its transpose. Additionally, ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, ``np.einsum('ij,jh', a, b)`` returns the transpose of the multiplication since subscript 'h' precedes subscript 'i'. In *explicit mode* the output can be directly controlled by specifying output subscript labels. This requires the identifier '->' as well as the list of output subscript labels. This feature increases the flexibility of the function since summing can be disabled or forced when required. The call ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`, and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`. The difference is that `einsum` does not allow broadcasting by default. Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, one can do ``np.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` produces a view (changed in version 1.10.0). `einsum` also provides an alternative way to provide the subscripts and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. If the output shape is not provided in this format `einsum` will be calculated in implicit mode, otherwise it will be performed explicitly. The examples below have corresponding `einsum` calls with the two parameter methods. .. versionadded:: 1.10.0 Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>` and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal of a 2D array. Examples -------- >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) Trace of a matrix: >>> np.einsum('ii', a) 60 >>> np.einsum(a, [0,0]) 60 >>> np.trace(a) 60 Extract the diagonal (requires explicit form): >>> np.einsum('ii->i', a) array([ 0, 6, 12, 18, 24]) >>> np.einsum(a, [0,0], [0]) array([ 0, 6, 12, 18, 24]) >>> np.diag(a) array([ 0, 6, 12, 18, 24]) Sum over an axis (requires explicit form): >>> np.einsum('ij->i', a) array([ 10, 35, 60, 85, 110]) >>> np.einsum(a, [0,1], [0]) array([ 10, 35, 60, 85, 110]) >>> np.sum(a, axis=1) array([ 10, 35, 60, 85, 110]) For higher dimensional arrays summing a single axis can be done with ellipsis: >>> np.einsum('...j->...', a) array([ 10, 35, 60, 85, 110]) >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) array([ 10, 35, 60, 85, 110]) Compute a matrix transpose, or reorder any number of axes: >>> np.einsum('ji', c) array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum('ij->ji', c) array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum(c, [1,0]) array([[0, 3], [1, 4], [2, 5]]) >>> np.transpose(c) array([[0, 3], [1, 4], [2, 5]]) Vector inner products: >>> np.einsum('i,i', b, b) 30 >>> np.einsum(b, [0], b, [0]) 30 >>> np.inner(b,b) 30 Matrix vector multiplication: >>> np.einsum('ij,j', a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum(a, [0,1], b, [1]) array([ 30, 80, 130, 180, 230]) >>> np.dot(a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum('...j,j', a, b) array([ 30, 80, 130, 180, 230]) Broadcasting and scalar multiplication: >>> np.einsum('..., ...', 3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum(',ij', 3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.multiply(3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) Vector outer product: >>> np.einsum('i,j', np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.einsum(np.arange(2)+1, [0], b, [1]) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.outer(np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) Tensor contraction: >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> np.einsum('ijk,jil->kl', a, b) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> np.tensordot(a,b, axes=([1,0],[0,1])) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) Writeable returned arrays (since version 1.10.0): >>> a = np.zeros((3, 3)) >>> np.einsum('ii->i', a)[:] = 1 >>> a array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) Example of ellipsis use: >>> a = np.arange(6).reshape((3,2)) >>> b = np.arange(12).reshape((4,3)) >>> np.einsum('ki,jk->ij', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('ki,...k->i...', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('k...,jk', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) """) ############################################################################## # # Documentation for ndarray attributes and methods # ############################################################################## ############################################################################## # # ndarray object # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', """ ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the format of each element in the array (its byte-order, how many bytes it occupies in memory, whether it is an integer, a floating point number, or something else, etc.) Arrays should be constructed using `array`, `zeros` or `empty` (refer to the See Also section below). The parameters given here refer to a low-level method (`ndarray(...)`) for instantiating an array. For more information, refer to the `numpy` module and examine the methods and attributes of an array. Parameters ---------- (for the __new__ method; see Notes below) shape : tuple of ints Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional Offset of array data in buffer. strides : tuple of ints, optional Strides of data in memory. order : {'C', 'F'}, optional Row-major (C-style) or column-major (Fortran-style) order. Attributes ---------- T : ndarray Transpose of the array. data : buffer The array's elements, in memory. dtype : dtype object Describes the format of the elements in the array. flags : dict Dictionary containing information related to memory use, e.g., 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. flat : numpy.flatiter object Flattened version of the array as an iterator. The iterator allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for assignment examples; TODO). imag : ndarray Imaginary part of the array. real : ndarray Real part of the array. size : int Number of elements in the array. itemsize : int The memory use of each array element in bytes. nbytes : int The total number of bytes required to store the array data, i.e., ``itemsize * size``. ndim : int The array's number of dimensions. shape : tuple of ints Shape of the array. strides : tuple of ints The step-size required to move from one element to the next in memory. For example, a contiguous ``(3, 4)`` array of type ``int16`` in C-order has strides ``(8, 2)``. This implies that to move from element to element in memory requires jumps of 2 bytes. To move from row-to-row, one needs to jump 8 bytes at a time (``2 * 4``). ctypes : ctypes object Class containing properties of the array needed for interaction with ctypes. base : ndarray If the array is a view into another array, that array is its `base` (unless that array is also a view). The `base` array is where the array data is actually stored. See Also -------- array : Construct an array. zeros : Create an array, each element of which is zero. empty : Create an array, but leave its allocated memory unchanged (i.e., it contains "garbage"). dtype : Create a data-type. Notes ----- There are two modes of creating an array using ``__new__``: 1. If `buffer` is None, then only `shape`, `dtype`, and `order` are used. 2. If `buffer` is an object exposing the buffer interface, then all keywords are interpreted. No ``__init__`` method is needed because the array is fully initialized after the ``__new__`` method. Examples -------- These examples illustrate the low-level `ndarray` constructor. Refer to the `See Also` section above for easier ways of constructing an ndarray. First mode, `buffer` is None: >>> np.ndarray(shape=(2,2), dtype=float, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) Second mode: >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, ... dtype=int) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) ############################################################################## # # ndarray attributes # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', """Array protocol: Python side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', """None.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', """Array priority.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('base', """ Base object if memory is from some other object. Examples -------- The base of an array that owns its memory is None: >>> x = np.array([1,2,3,4]) >>> x.base is None True Slicing creates a view, whose memory is shared with x: >>> y = x[2:] >>> y.base is x True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', """ An object to simplify the interaction of the array with the ctypes module. This attribute creates an object that makes it easier to use arrays when calling shared libraries with the ctypes module. The returned object has, among others, data, shape, and strides attributes (see Notes below) which themselves return ctypes objects that can be used as arguments to a shared library. Parameters ---------- None Returns ------- c : Python object Possessing attributes data, shape, strides, etc. See Also -------- numpy.ctypeslib Notes ----- Below are the public attributes of this object which were documented in "Guide to NumPy" (we have omitted undocumented public attributes, as well as documented private attributes): .. autoattribute:: numpy.core._internal._ctypes.data :noindex: .. autoattribute:: numpy.core._internal._ctypes.shape :noindex: .. autoattribute:: numpy.core._internal._ctypes.strides :noindex: .. automethod:: numpy.core._internal._ctypes.data_as :noindex: .. automethod:: numpy.core._internal._ctypes.shape_as :noindex: .. automethod:: numpy.core._internal._ctypes.strides_as :noindex: If the ctypes module is not available, then the ctypes attribute of array objects still returns something useful, but ctypes objects are not returned and errors may be raised instead. In particular, the object will still have the ``as_parameter`` attribute which will return an integer equal to the data attribute. Examples -------- >>> import ctypes >>> x array([[0, 1], [2, 3]]) >>> x.ctypes.data 30439712 >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) <ctypes.LP_c_long object at 0x01F01300> >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents c_long(0) >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents c_longlong(4294967296L) >>> x.ctypes.shape <numpy.core._internal.c_long_Array_2 object at 0x01FFD580> >>> x.ctypes.shape_as(ctypes.c_long) <numpy.core._internal.c_long_Array_2 object at 0x01FCE620> >>> x.ctypes.strides <numpy.core._internal.c_long_Array_2 object at 0x01FCE620> >>> x.ctypes.strides_as(ctypes.c_longlong) <numpy.core._internal.c_longlong_Array_2 object at 0x01F01300> """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('data', """Python buffer object pointing to the start of the array's data.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', """ Data-type of the array's elements. Parameters ---------- None Returns ------- d : numpy dtype object See Also -------- numpy.dtype Examples -------- >>> x array([[0, 1], [2, 3]]) >>> x.dtype dtype('int32') >>> type(x.dtype) <type 'numpy.dtype'> """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', """ The imaginary part of the array. Examples -------- >>> x = np.sqrt([1+0j, 0+1j]) >>> x.imag array([ 0. , 0.70710678]) >>> x.imag.dtype dtype('float64') """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', """ Length of one array element in bytes. Examples -------- >>> x = np.array([1,2,3], dtype=np.float64) >>> x.itemsize 8 >>> x = np.array([1,2,3], dtype=np.complex128) >>> x.itemsize 16 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', """ Information about the memory layout of the array. Attributes ---------- C_CONTIGUOUS (C) The data is in a single, C-style contiguous segment. F_CONTIGUOUS (F) The data is in a single, Fortran-style contiguous segment. OWNDATA (O) The array owns the memory it uses or borrows it from another object. WRITEABLE (W) The data area can be written to. Setting this to False locks the data, making it read-only. A view (slice, etc.) inherits WRITEABLE from its base array at creation time, but a view of a writeable array may be subsequently locked while the base array remains writeable. (The opposite is not true, in that a view of a locked array may not be made writeable. However, currently, locking a base object does not lock any views that already reference it, so under that circumstance it is possible to alter the contents of a locked array via a previously created writeable view onto it.) Attempting to change a non-writeable array raises a RuntimeError exception. ALIGNED (A) The data and all elements are aligned appropriately for the hardware. WRITEBACKIFCOPY (X) This array is a copy of some other array. The C-API function PyArray_ResolveWritebackIfCopy must be called before deallocating to the base array will be updated with the contents of this array. UPDATEIFCOPY (U) (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. When this array is deallocated, the base array will be updated with the contents of this array. FNC F_CONTIGUOUS and not C_CONTIGUOUS. FORC F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). BEHAVED (B) ALIGNED and WRITEABLE. CARRAY (CA) BEHAVED and C_CONTIGUOUS. FARRAY (FA) BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. Notes ----- The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag names are only supported in dictionary access. Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by the user, via direct assignment to the attribute or dictionary entry, or by calling `ndarray.setflags`. The array flags cannot be set arbitrarily: - UPDATEIFCOPY can only be set ``False``. - WRITEBACKIFCOPY can only be set ``False``. - ALIGNED can only be set ``True`` if the data is truly aligned. - WRITEABLE can only be set ``True`` if the array owns its own memory or the ultimate owner of the memory exposes a writeable buffer interface or is a string. Arrays can be both C-style and Fortran-style contiguous simultaneously. This is clear for 1-dimensional arrays, but can also be true for higher dimensional arrays. Even for contiguous arrays a stride for a given dimension ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` or the array has no elements. It does *not* generally hold that ``self.strides[-1] == self.itemsize`` for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for Fortran-style contiguous arrays is true. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', """ A 1-D iterator over the array. This is a `numpy.flatiter` instance, which acts similarly to, but is not a subclass of, Python's built-in iterator object. See Also -------- flatten : Return a copy of the array collapsed into one dimension. flatiter Examples -------- >>> x = np.arange(1, 7).reshape(2, 3) >>> x array([[1, 2, 3], [4, 5, 6]]) >>> x.flat[3] 4 >>> x.T array([[1, 4], [2, 5], [3, 6]]) >>> x.T.flat[3] 5 >>> type(x.flat) <class 'numpy.flatiter'> An assignment example: >>> x.flat = 3; x array([[3, 3, 3], [3, 3, 3]]) >>> x.flat[[1,4]] = 1; x array([[3, 1, 3], [3, 1, 3]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', """ Total bytes consumed by the elements of the array. Notes ----- Does not include memory consumed by non-element attributes of the array object. Examples -------- >>> x = np.zeros((3,5,2), dtype=np.complex128) >>> x.nbytes 480 >>> np.prod(x.shape) * x.itemsize 480 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', """ Number of array dimensions. Examples -------- >>> x = np.array([1, 2, 3]) >>> x.ndim 1 >>> y = np.zeros((2, 3, 4)) >>> y.ndim 3 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('real', """ The real part of the array. Examples -------- >>> x = np.sqrt([1+0j, 0+1j]) >>> x.real array([ 1. , 0.70710678]) >>> x.real.dtype dtype('float64') See Also -------- numpy.real : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', """ Tuple of array dimensions. The shape property is usually used to get the current shape of an array, but may also be used to reshape the array in-place by assigning a tuple of array dimensions to it. As with `numpy.reshape`, one of the new shape dimensions can be -1, in which case its value is inferred from the size of the array and the remaining dimensions. Reshaping an array in-place will fail if a copy is required. Examples -------- >>> x = np.array([1, 2, 3, 4]) >>> x.shape (4,) >>> y = np.zeros((2, 3, 4)) >>> y.shape (2, 3, 4) >>> y.shape = (3, 8) >>> y array([[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]]) >>> y.shape = (3, 6) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: total size of new array must be unchanged >>> np.zeros((4,2))[::2].shape = (-1,) Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: incompatible shape for a non-contiguous array See Also -------- numpy.reshape : similar function ndarray.reshape : similar method """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('size', """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the array's dimensions. Notes ----- `a.size` returns a standard arbitrary precision Python integer. This may not be the case with other methods of obtaining the same value (like the suggested ``np.prod(a.shape)``, which returns an instance of ``np.int_``), and may be relevant if the value is used further in calculations that may overflow a fixed size integer type. Examples -------- >>> x = np.zeros((3, 5, 2), dtype=np.complex128) >>> x.size 30 >>> np.prod(x.shape) 30 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', """ Tuple of bytes to step in each dimension when traversing an array. The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` is:: offset = sum(np.array(i) * a.strides) A more detailed explanation of strides can be found in the "ndarray.rst" file in the NumPy reference guide. Notes ----- Imagine an array of 32-bit integers (each 4 bytes):: x = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int32) This array is stored in memory as 40 bytes, one after the other (known as a contiguous block of memory). The strides of an array tell us how many bytes we have to skip in memory to move to the next position along a certain axis. For example, we have to skip 4 bytes (1 value) to move to the next column, but 20 bytes (5 values) to get to the same position in the next row. As such, the strides for the array `x` will be ``(20, 4)``. See Also -------- numpy.lib.stride_tricks.as_strided Examples -------- >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) >>> y array([[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]) >>> y.strides (48, 16, 4) >>> y[1,1,1] 17 >>> offset=sum(y.strides * np.array((1,1,1))) >>> offset/y.itemsize 17 >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) >>> x.strides (32, 4, 224, 1344) >>> i = np.array([3,5,2,2]) >>> offset = sum(i * x.strides) >>> x[3,5,2,2] 813 >>> offset / x.itemsize 813 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('T', """ The transposed array. Same as ``self.transpose()``. Examples -------- >>> x = np.array([[1.,2.],[3.,4.]]) >>> x array([[ 1., 2.], [ 3., 4.]]) >>> x.T array([[ 1., 3.], [ 2., 4.]]) >>> x = np.array([1.,2.,3.,4.]) >>> x array([ 1., 2., 3., 4.]) >>> x.T array([ 1., 2., 3., 4.]) See Also -------- transpose """)) ############################################################################## # # ndarray methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. Returns either a new reference to self if dtype is not given or a new array of provided data type if dtype is different from the current dtype of the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', """a.__array_wrap__(obj) -> Object of same type as ndarray object a. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', """a.__copy__() Used if :func:`copy.copy` is called on an array. Returns a copy of the array. Equivalent to ``a.copy(order='K')``. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', """a.__deepcopy__(memo, /) -> Deep copy of array. Used if :func:`copy.deepcopy` is called on an array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', """a.__reduce__() For pickling. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', """a.__setstate__(state, /) For unpickling. The `state` argument must be a sequence that contains the following elements: Parameters ---------- version : int optional pickle version. If omitted defaults to 0. shape : tuple dtype : data-type isFortran : bool rawdata : string or list a binary string with the data (or a list if 'a' is an object array) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('all', """ a.all(axis=None, out=None, keepdims=False) Returns True if all elements evaluate to True. Refer to `numpy.all` for full documentation. See Also -------- numpy.all : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('any', """ a.any(axis=None, out=None, keepdims=False) Returns True if any of the elements of `a` evaluate to True. Refer to `numpy.any` for full documentation. See Also -------- numpy.any : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', """ a.argmax(axis=None, out=None) Return indices of the maximum values along the given axis. Refer to `numpy.argmax` for full documentation. See Also -------- numpy.argmax : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', """ a.argmin(axis=None, out=None) Return indices of the minimum values along the given axis of `a`. Refer to `numpy.argmin` for detailed documentation. See Also -------- numpy.argmin : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', """ a.argsort(axis=-1, kind=None, order=None) Returns the indices that would sort this array. Refer to `numpy.argsort` for full documentation. See Also -------- numpy.argsort : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', """ a.argpartition(kth, axis=-1, kind='introselect', order=None) Returns the indices that would partition this array. Refer to `numpy.argpartition` for full documentation. .. versionadded:: 1.8.0 See Also -------- numpy.argpartition : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) Copy of the array, cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. 'C' means C order, 'F' means Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to false, and the `dtype`, `order`, and `subok` requirements are satisfied, the input array is returned instead of a copy. Returns ------- arr_t : ndarray Unless `copy` is False and the other conditions for returning the input array are satisfied (see description for `copy` input parameter), `arr_t` is a new array of the same shape as the input array, with dtype, order given by `dtype`, `order`. Notes ----- .. versionchanged:: 1.17.0 Casting between a simple data type and a structured one is possible only for "unsafe" casting. Casting to multiple fields is allowed, but casting from multiple fields is not. .. versionchanged:: 1.9.0 Casting from numeric to string types in 'safe' casting mode requires that the string dtype length is long enough to store the max integer/float value converted. Raises ------ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. Examples -------- >>> x = np.array([1, 2, 2.5]) >>> x array([1. , 2. , 2.5]) >>> x.astype(int) array([1, 2, 2]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', """ a.byteswap(inplace=False) Swap the bytes of the array elements Toggle between low-endian and big-endian data representation by returning a byteswapped array, optionally swapped in-place. Arrays of byte-strings are not swapped. The real and imaginary parts of a complex number are swapped individually. Parameters ---------- inplace : bool, optional If ``True``, swap bytes in-place, default is ``False``. Returns ------- out : ndarray The byteswapped array. If `inplace` is ``True``, this is a view to self. Examples -------- >>> A = np.array([1, 256, 8755], dtype=np.int16) >>> list(map(hex, A)) ['0x1', '0x100', '0x2233'] >>> A.byteswap(inplace=True) array([ 256, 1, 13090], dtype=int16) >>> list(map(hex, A)) ['0x100', '0x1', '0x3322'] Arrays of byte-strings are not swapped >>> A = np.array([b'ceg', b'fac']) >>> A.byteswap() array([b'ceg', b'fac'], dtype='|S3') ``A.newbyteorder().byteswap()`` produces an array with the same values but different representation in memory >>> A = np.array([1, 2, 3]) >>> A.view(np.uint8) array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0], dtype=uint8) >>> A.newbyteorder().byteswap(inplace=True) array([1, 2, 3]) >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', """ a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. Refer to `numpy.choose` for full documentation. See Also -------- numpy.choose : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', """ a.clip(min=None, max=None, out=None, **kwargs) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. Refer to `numpy.clip` for full documentation. See Also -------- numpy.clip : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', """ a.compress(condition, axis=None, out=None) Return selected slices of this array along given axis. Refer to `numpy.compress` for full documentation. See Also -------- numpy.compress : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', """ a.conj() Complex-conjugate all elements. Refer to `numpy.conjugate` for full documentation. See Also -------- numpy.conjugate : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', """ a.conjugate() Return the complex conjugate, element-wise. Refer to `numpy.conjugate` for full documentation. See Also -------- numpy.conjugate : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', """ a.copy(order='C') Return a copy of the array. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :func:`numpy.copy` are very similar, but have different default values for their order= arguments.) See also -------- numpy.copy numpy.copyto Examples -------- >>> x = np.array([[1,2,3],[4,5,6]], order='F') >>> y = x.copy() >>> x.fill(0) >>> x array([[0, 0, 0], [0, 0, 0]]) >>> y array([[1, 2, 3], [4, 5, 6]]) >>> y.flags['C_CONTIGUOUS'] True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', """ a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. Refer to `numpy.cumprod` for full documentation. See Also -------- numpy.cumprod : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', """ a.cumsum(axis=None, dtype=None, out=None) Return the cumulative sum of the elements along the given axis. Refer to `numpy.cumsum` for full documentation. See Also -------- numpy.cumsum : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', """ a.diagonal(offset=0, axis1=0, axis2=1) Return specified diagonals. In NumPy 1.9 the returned array is a read-only view instead of a copy as in previous NumPy versions. In a future version the read-only restriction will be removed. Refer to :func:`numpy.diagonal` for full documentation. See Also -------- numpy.diagonal : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', """ a.dot(b, out=None) Dot product of two arrays. Refer to `numpy.dot` for full documentation. See Also -------- numpy.dot : equivalent function Examples -------- >>> a = np.eye(2) >>> b = np.ones((2, 2)) * 2 >>> a.dot(b) array([[2., 2.], [2., 2.]]) This array method can be conveniently chained: >>> a.dot(b).dot(b) array([[8., 8.], [8., 8.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', """a.dump(file) Dump a pickle of the array to the specified file. The array can be read back with pickle.load or numpy.load. Parameters ---------- file : str or Path A string naming the dump file. .. versionchanged:: 1.17.0 `pathlib.Path` objects are now accepted. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', """ a.dumps() Returns the pickle of the array as a string. pickle.loads or numpy.loads will convert the string back to an array. Parameters ---------- None """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', """ a.fill(value) Fill the array with a scalar value. Parameters ---------- value : scalar All elements of `a` will be assigned this value. Examples -------- >>> a = np.array([1, 2]) >>> a.fill(0) >>> a array([0, 0]) >>> a = np.empty(2) >>> a.fill(1) >>> a array([1., 1.]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', """ a.flatten(order='C') Return a copy of the array collapsed into one dimension. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran- style) order. 'A' means to flatten in column-major order if `a` is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten `a` in the order the elements occur in memory. The default is 'C'. Returns ------- y : ndarray A copy of the input array, flattened to one dimension. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the array. Examples -------- >>> a = np.array([[1,2], [3,4]]) >>> a.flatten() array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', """ a.getfield(dtype, offset=0) Returns a field of the given array as a certain type. A field is a view of the array data with a given data-type. The values in the view are determined by the given type and the offset into the current array in bytes. The offset needs to be such that the view dtype fits in the array dtype; for example an array of dtype complex128 has 16-byte elements. If taking a view with a 32-bit integer (4 bytes), the offset needs to be between 0 and 12 bytes. Parameters ---------- dtype : str or dtype The data type of the view. The dtype size of the view can not be larger than that of the array itself. offset : int Number of bytes to skip before beginning the element view. Examples -------- >>> x = np.diag([1.+1.j]*2) >>> x[1, 1] = 2 + 4.j >>> x array([[1.+1.j, 0.+0.j], [0.+0.j, 2.+4.j]]) >>> x.getfield(np.float64) array([[1., 0.], [0., 2.]]) By choosing an offset of 8 bytes we can select the complex part of the array for our view: >>> x.getfield(np.float64, offset=8) array([[1., 0.], [0., 4.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('item', """ a.item(*args) Copy an element of an array to a standard Python scalar and return it. Parameters ---------- \\*args : Arguments (variable number and type) * none: in this case, the method only works for arrays with one element (`a.size == 1`), which element is copied into a standard Python scalar object and returned. * int_type: this argument is interpreted as a flat index into the array, specifying which element to copy and return. * tuple of int_types: functions as does a single int_type argument, except that the argument is interpreted as an nd-index into the array. Returns ------- z : Standard Python scalar object A copy of the specified element of the array as a suitable Python scalar Notes ----- When the data type of `a` is longdouble or clongdouble, item() returns a scalar array object because there is no available Python scalar that would not lose information. Void arrays return a buffer object for item(), unless fields are defined, in which case a tuple is returned. `item` is very similar to a[args], except, instead of an array scalar, a standard Python scalar is returned. This can be useful for speeding up access to elements of the array and doing arithmetic on elements of the array using Python's optimized math. Examples -------- >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x array([[2, 2, 6], [1, 3, 6], [1, 0, 1]]) >>> x.item(3) 1 >>> x.item(7) 0 >>> x.item((0, 1)) 2 >>> x.item((2, 2)) 1 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', """ a.itemset(*args) Insert scalar into an array (scalar is cast to array's dtype, if possible) There must be at least 1 argument, and define the last argument as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster than ``a[args] = item``. The item should be a scalar value and `args` must select a single item in the array `a`. Parameters ---------- \\*args : Arguments If one argument: a scalar, only used in case `a` is of size 1. If two arguments: the last argument is the value to be set and must be a scalar, the first argument specifies a single array element location. It is either an int or a tuple. Notes ----- Compared to indexing syntax, `itemset` provides some speed increase for placing a scalar into a particular location in an `ndarray`, if you must do this. However, generally this is discouraged: among other problems, it complicates the appearance of the code. Also, when using `itemset` (and `item`) inside a loop, be sure to assign the methods to a local variable to avoid the attribute look-up at each loop iteration. Examples -------- >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x array([[2, 2, 6], [1, 3, 6], [1, 0, 1]]) >>> x.itemset(4, 0) >>> x.itemset((2, 2), 9) >>> x array([[2, 2, 6], [1, 0, 6], [1, 0, 9]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('max', """ a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True) Return the maximum along a given axis. Refer to `numpy.amax` for full documentation. See Also -------- numpy.amax : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', """ a.mean(axis=None, dtype=None, out=None, keepdims=False) Returns the average of the array elements along given axis. Refer to `numpy.mean` for full documentation. See Also -------- numpy.mean : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('min', """ a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True) Return the minimum along a given axis. Refer to `numpy.amin` for full documentation. See Also -------- numpy.amin : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', """ arr.newbyteorder(new_order='S') Return the array with the same data viewed with a different byte order. Equivalent to:: arr.view(arr.dtype.newbytorder(new_order)) Changes are also made in all fields and sub-arrays of the array data type. Parameters ---------- new_order : string, optional Byte order to force; a value from the byte order specifications below. `new_order` codes can be any of: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first letter of `new_order` for the alternatives above. For example, any of 'B' or 'b' or 'biggish' are valid to specify big-endian. Returns ------- new_arr : array New array object with the dtype reflecting given change to the byte order. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', """ a.nonzero() Return the indices of the elements that are non-zero. Refer to `numpy.nonzero` for full documentation. See Also -------- numpy.nonzero : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', """ a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True) Return the product of the array elements over the given axis Refer to `numpy.prod` for full documentation. See Also -------- numpy.prod : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', """ a.ptp(axis=None, out=None, keepdims=False) Peak to peak (maximum - minimum) value along a given axis. Refer to `numpy.ptp` for full documentation. See Also -------- numpy.ptp : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('put', """ a.put(indices, values, mode='raise') Set ``a.flat[n] = values[n]`` for all `n` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', """ a.ravel([order]) Return a flattened array. Refer to `numpy.ravel` for full documentation. See Also -------- numpy.ravel : equivalent function ndarray.flat : a flat iterator on the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', """ a.repeat(repeats, axis=None) Repeat elements of an array. Refer to `numpy.repeat` for full documentation. See Also -------- numpy.repeat : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', """ a.reshape(shape, order='C') Returns an array containing the same data with a new shape. Refer to `numpy.reshape` for full documentation. See Also -------- numpy.reshape : equivalent function Notes ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. For example, ``a.reshape(10, 11)`` is equivalent to ``a.reshape((10, 11))``. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', """ a.resize(new_shape, refcheck=True) Change shape and size of array in-place. Parameters ---------- new_shape : tuple of ints, or `n` ints Shape of resized array. refcheck : bool, optional If False, reference count will not be checked. Default is True. Returns ------- None Raises ------ ValueError If `a` does not own its own data or references or views to it exist, and the data memory must be changed. PyPy only: will always raise if the data memory must be changed, since there is no reliable way to determine if references or views to it exist. SystemError If the `order` keyword argument is specified. This behaviour is a bug in NumPy. See Also -------- resize : Return a new array with the specified shape. Notes ----- This reallocates space for the data area if necessary. Only contiguous arrays (data elements consecutive in memory) can be resized. The purpose of the reference count check is to make sure you do not use this array as a buffer for another Python object and then reallocate the memory. However, reference counts can increase in other ways so if you are sure that you have not shared the memory for this array with another Python object, then you may safely set `refcheck` to False. Examples -------- Shrinking an array: array is flattened (in the order that the data are stored in memory), resized, and reshaped: >>> a = np.array([[0, 1], [2, 3]], order='C') >>> a.resize((2, 1)) >>> a array([[0], [1]]) >>> a = np.array([[0, 1], [2, 3]], order='F') >>> a.resize((2, 1)) >>> a array([[0], [2]]) Enlarging an array: as above, but missing entries are filled with zeros: >>> b = np.array([[0, 1], [2, 3]]) >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple >>> b array([[0, 1, 2], [3, 0, 0]]) Referencing an array prevents resizing... >>> c = a >>> a.resize((1, 1)) Traceback (most recent call last): ... ValueError: cannot resize an array that references or is referenced ... Unless `refcheck` is False: >>> a.resize((1, 1), refcheck=False) >>> a array([[0]]) >>> c array([[0]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('round', """ a.round(decimals=0, out=None) Return `a` with each element rounded to the given number of decimals. Refer to `numpy.around` for full documentation. See Also -------- numpy.around : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', """ a.searchsorted(v, side='left', sorter=None) Find indices where elements of v should be inserted in a to maintain order. For full documentation, see `numpy.searchsorted` See Also -------- numpy.searchsorted : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', """ a.setfield(val, dtype, offset=0) Put a value into a specified place in a field defined by a data-type. Place `val` into `a`'s field defined by `dtype` and beginning `offset` bytes into the field. Parameters ---------- val : object Value to be placed in field. dtype : dtype object Data-type of the field in which to place `val`. offset : int, optional The number of bytes into the field at which to place `val`. Returns ------- None See Also -------- getfield Examples -------- >>> x = np.eye(3) >>> x.getfield(np.float64) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) >>> x.setfield(3, np.int32) >>> x.getfield(np.int32) array([[3, 3, 3], [3, 3, 3], [3, 3, 3]], dtype=int32) >>> x array([[1.0e+000, 1.5e-323, 1.5e-323], [1.5e-323, 1.0e+000, 1.5e-323], [1.5e-323, 1.5e-323, 1.0e+000]]) >>> x.setfield(np.eye(3), np.int32) >>> x array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', """ a.setflags(write=None, align=None, uic=None) Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), respectively. These Boolean-valued flags affect how numpy interprets the memory area used by `a` (see Notes below). The ALIGNED flag can only be set to True if the data is actually aligned according to the type. The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set to True. The flag WRITEABLE can only be set to True if the array owns its own memory, or the ultimate owner of the memory exposes a writeable buffer interface, or is a string. (The exception for string is made so that unpickling can be done without copying memory.) Parameters ---------- write : bool, optional Describes whether or not `a` can be written to. align : bool, optional Describes whether or not `a` is aligned properly for its type. uic : bool, optional Describes whether or not `a` is a copy of another "base" array. Notes ----- Array flags provide information about how the memory area used for the array is to be interpreted. There are 7 Boolean flags in use, only four of which can be changed by the user: WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. WRITEABLE (W) the data area can be written to; ALIGNED (A) the data and strides are aligned appropriately for the hardware (as determined by the compiler); UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced by .base). When the C-API function PyArray_ResolveWritebackIfCopy is called, the base array will be updated with the contents of this array. All flags can be accessed using the single (upper case) letter as well as the full name. Examples -------- >>> y = np.array([[3, 1, 7], ... [2, 0, 0], ... [8, 5, 9]]) >>> y array([[3, 1, 7], [2, 0, 0], [8, 5, 9]]) >>> y.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : True WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(write=0, align=0) >>> y.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : True WRITEABLE : False ALIGNED : False WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(uic=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: cannot set WRITEBACKIFCOPY flag to True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', """ a.sort(axis=-1, kind=None, order=None) Sort an array in-place. Refer to `numpy.sort` for full documentation. Parameters ---------- axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. The default is 'quicksort'. Note that both 'stable' and 'mergesort' use timsort under the covers and, in general, the actual implementation will vary with datatype. The 'mergesort' option is retained for backwards compatibility. .. versionchanged:: 1.15.0. The 'stable' option was added. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. See Also -------- numpy.sort : Return a sorted copy of an array. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in sorted array. partition: Partial sort. Notes ----- See `numpy.sort` for notes on the different sorting algorithms. Examples -------- >>> a = np.array([[1,4], [3,1]]) >>> a.sort(axis=1) >>> a array([[1, 4], [1, 3]]) >>> a.sort(axis=0) >>> a array([[1, 3], [1, 4]]) Use the `order` keyword to specify a field to use when sorting a structured array: >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) >>> a.sort(order='y') >>> a array([(b'c', 1), (b'a', 2)], dtype=[('x', 'S1'), ('y', '<i8')]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('partition', """ a.partition(kth, axis=-1, kind='introselect', order=None) Rearranges the elements in the array in such a way that the value of the element in kth position is in the position it would be in a sorted array. All elements smaller than the kth element are moved before this element and all equal or greater are moved behind it. The ordering of the elements in the two partitions is undefined. .. versionadded:: 1.8.0 Parameters ---------- kth : int or sequence of ints Element index to partition by. The kth element value will be in its final sorted position and all smaller elements will be moved before it and all equal or greater elements behind it. The order of all elements in the partitions is undefined. If provided with a sequence of kth it will partition all elements indexed by kth of them into their sorted position at once. axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. kind : {'introselect'}, optional Selection algorithm. Default is 'introselect'. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need to be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. See Also -------- numpy.partition : Return a parititioned copy of an array. argpartition : Indirect partition. sort : Full sort. Notes ----- See ``np.partition`` for notes on the different algorithms. Examples -------- >>> a = np.array([3, 4, 2, 1]) >>> a.partition(3) >>> a array([2, 1, 3, 4]) >>> a.partition((1, 3)) >>> a array([1, 2, 3, 4]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', """ a.squeeze(axis=None) Remove single-dimensional entries from the shape of `a`. Refer to `numpy.squeeze` for full documentation. See Also -------- numpy.squeeze : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('std', """ a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) Returns the standard deviation of the array elements along given axis. Refer to `numpy.std` for full documentation. See Also -------- numpy.std : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', """ a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) Return the sum of the array elements over the given axis. Refer to `numpy.sum` for full documentation. See Also -------- numpy.sum : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', """ a.swapaxes(axis1, axis2) Return a view of the array with `axis1` and `axis2` interchanged. Refer to `numpy.swapaxes` for full documentation. See Also -------- numpy.swapaxes : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('take', """ a.take(indices, axis=None, out=None, mode='raise') Return an array formed from the elements of `a` at the given indices. Refer to `numpy.take` for full documentation. See Also -------- numpy.take : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', """ a.tofile(fid, sep="", format="%s") Write array to a file as text or binary (default). Data is always written in 'C' order, independent of the order of `a`. The data produced by this method can be recovered using the function fromfile(). Parameters ---------- fid : file or str or Path An open file object, or a string containing a filename. .. versionchanged:: 1.17.0 `pathlib.Path` objects are now accepted. sep : str Separator between array items for text output. If "" (empty), a binary file is written, equivalent to ``file.write(a.tobytes())``. format : str Format string for text file output. Each entry in the array is formatted to text by first converting it to the closest Python type, and then using "format" % item. Notes ----- This is a convenience function for quick storage of array data. Information on endianness and precision is lost, so this method is not a good choice for files intended to archive data or transport data between machines with different endianness. Some of these problems can be overcome by outputting the data as text files, at the expense of speed and file size. When fid is a file object, array contents are directly written to the file, bypassing the file object's ``write`` method. As a result, tofile cannot be used with files objects supporting compression (e.g., GzipFile) or file-like objects that do not support ``fileno()`` (e.g., BytesIO). """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', """ a.tolist() Return the array as an ``a.ndim``-levels deep nested list of Python scalars. Return a copy of the array data as a (nested) Python list. Data items are converted to the nearest compatible builtin Python type, via the `~numpy.ndarray.item` function. If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will not be a list at all, but a simple Python scalar. Parameters ---------- none Returns ------- y : object, or list of object, or list of list of object, or ... The possibly nested list of array elements. Notes ----- The array may be recreated via ``a = np.array(a.tolist())``, although this may sometimes lose precision. Examples -------- For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``: >>> a = np.array([1, 2]) >>> list(a) [1, 2] >>> a.tolist() [1, 2] However, for a 2D array, ``tolist`` applies recursively: >>> a = np.array([[1, 2], [3, 4]]) >>> list(a) [array([1, 2]), array([3, 4])] >>> a.tolist() [[1, 2], [3, 4]] The base case for this recursion is a 0D array: >>> a = np.array(1) >>> list(a) Traceback (most recent call last): ... TypeError: iteration over a 0-d array >>> a.tolist() 1 """)) tobytesdoc = """ a.{name}(order='C') Construct Python bytes containing the raw data bytes in the array. Constructs Python bytes showing a copy of the raw contents of data memory. The bytes object can be produced in either 'C' or 'Fortran', or 'Any' order (the default is 'C'-order). 'Any' order means C-order unless the F_CONTIGUOUS flag in the array is set, in which case it means 'Fortran' order. {deprecated} Parameters ---------- order : {{'C', 'F', None}}, optional Order of the data for multidimensional arrays: C, Fortran, or the same as for the original array. Returns ------- s : bytes Python bytes exhibiting a copy of `a`'s raw data. Examples -------- >>> x = np.array([[0, 1], [2, 3]], dtype='<u2') >>> x.tobytes() b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' >>> x.tobytes('C') == x.tobytes() True >>> x.tobytes('F') b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' """ add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', tobytesdoc.format(name='tostring', deprecated= 'This function is a compatibility ' 'alias for tobytes. Despite its ' 'name it returns bytes not ' 'strings.'))) add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', tobytesdoc.format(name='tobytes', deprecated='.. versionadded:: 1.9.0'))) add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', """ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along diagonals of the array. Refer to `numpy.trace` for full documentation. See Also -------- numpy.trace : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', """ a.transpose(*axes) Returns a view of the array with axes transposed. For a 1-D array this has no effect, as a transposed vector is simply the same vector. To convert a 1-D array into a 2D column vector, an additional dimension must be added. `np.atleast2d(a).T` achieves this, as does `a[:, np.newaxis]`. For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given, their order indicates how the axes are permuted (see Examples). If axes are not provided and ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. Parameters ---------- axes : None, tuple of ints, or `n` ints * None or no argument: reverses the order of the axes. * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s `i`-th axis becomes `a.transpose()`'s `j`-th axis. * `n` ints: same as an n-tuple of the same ints (this form is intended simply as a "convenience" alternative to the tuple form) Returns ------- out : ndarray View of `a`, with axes suitably permuted. See Also -------- ndarray.T : Array property returning the array transposed. ndarray.reshape : Give a new shape to an array without changing its data. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], [3, 4]]) >>> a.transpose() array([[1, 3], [2, 4]]) >>> a.transpose((1, 0)) array([[1, 3], [2, 4]]) >>> a.transpose(1, 0) array([[1, 3], [2, 4]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('var', """ a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) Returns the variance of the array elements, along given axis. Refer to `numpy.var` for full documentation. See Also -------- numpy.var : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('view', """ a.view(dtype=None, type=None) New view of array with the same data. Parameters ---------- dtype : data-type or ndarray sub-class, optional Data-type descriptor of the returned view, e.g., float32 or int16. The default, None, results in the view having the same data-type as `a`. This argument can also be specified as an ndarray sub-class, which then specifies the type of the returned object (this is equivalent to setting the ``type`` parameter). type : Python type, optional Type of the returned view, e.g., ndarray or matrix. Again, the default None results in type preservation. Notes ----- ``a.view()`` is used two different ways: ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view of the array's memory with a different data-type. This can cause a reinterpretation of the bytes of memory. ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just returns an instance of `ndarray_subclass` that looks at the same array (same shape, dtype, etc.) This does not cause a reinterpretation of the memory. For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of bytes per entry than the previous dtype (for example, converting a regular array to a structured array), then the behavior of the view cannot be predicted just from the superficial appearance of ``a`` (shown by ``print(a)``). It also depends on exactly how ``a`` is stored in memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus defined as a slice or transpose, etc., the view may give different results. Examples -------- >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) Viewing array data using a different type and dtype: >>> y = x.view(dtype=np.int16, type=np.matrix) >>> y matrix([[513]], dtype=int16) >>> print(type(y)) <class 'numpy.matrix'> Creating a view on a structured array so it can be used in calculations >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) >>> xv = x.view(dtype=np.int8).reshape(-1,2) >>> xv array([[1, 2], [3, 4]], dtype=int8) >>> xv.mean(0) array([2., 3.]) Making changes to the view changes the underlying array >>> xv[0,1] = 20 >>> x array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) Using a view to convert an array to a recarray: >>> z = x.view(np.recarray) >>> z.a array([1, 3], dtype=int8) Views share data: >>> x[0] = (9, 10) >>> z[0] (9, 10) Views that change the dtype size (bytes per entry) should normally be avoided on arrays defined by slices, transposes, fortran-ordering, etc.: >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) >>> y = x[:, 0:2] >>> y array([[1, 2], [4, 5]], dtype=int16) >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) Traceback (most recent call last): ... ValueError: To change to a dtype of a different size, the array must be C-contiguous >>> z = y.copy() >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) array([[(1, 2)], [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')]) """)) ############################################################################## # # umath functions # ############################################################################## add_newdoc('numpy.core.umath', 'frompyfunc', """ frompyfunc(func, nin, nout) Takes an arbitrary Python function and returns a NumPy ufunc. Can be used, for example, to add broadcasting to a built-in Python function (see Examples section). Parameters ---------- func : Python function object An arbitrary Python function. nin : int The number of input arguments. nout : int The number of objects returned by `func`. Returns ------- out : ufunc Returns a NumPy universal function (``ufunc``) object. See Also -------- vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy Notes ----- The returned ufunc always returns PyObject arrays. Examples -------- Use frompyfunc to add broadcasting to the Python function ``oct``: >>> oct_array = np.frompyfunc(oct, 1, 1) >>> oct_array(np.array((10, 30, 100))) array(['0o12', '0o36', '0o144'], dtype=object) >>> np.array((oct(10), oct(30), oct(100))) # for comparison array(['0o12', '0o36', '0o144'], dtype='<U5') """) add_newdoc('numpy.core.umath', 'geterrobj', """ geterrobj() Return the current object that defines floating-point error handling. The error object contains all information that defines the error handling behavior in NumPy. `geterrobj` is used internally by the other functions that get and set error handling behavior (`geterr`, `seterr`, `geterrcall`, `seterrcall`). Returns ------- errobj : list The error object, a list containing three elements: [internal numpy buffer size, error mask, error callback function]. The error mask is a single integer that holds the treatment information on all four floating point errors. The information for each error type is contained in three bits of the integer. If we print it in base 8, we can see what treatment is set for "invalid", "under", "over", and "divide" (in that order). The printed string can be interpreted with * 0 : 'ignore' * 1 : 'warn' * 2 : 'raise' * 3 : 'call' * 4 : 'print' * 5 : 'log' See Also -------- seterrobj, seterr, geterr, seterrcall, geterrcall getbufsize, setbufsize Notes ----- For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- >>> np.geterrobj() # first get the defaults [8192, 521, None] >>> def err_handler(type, flag): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> old_bufsize = np.setbufsize(20000) >>> old_err = np.seterr(divide='raise') >>> old_handler = np.seterrcall(err_handler) >>> np.geterrobj() [8192, 521, <function err_handler at 0x91dcaac>] >>> old_err = np.seterr(all='ignore') >>> np.base_repr(np.geterrobj()[1], 8) '0' >>> old_err = np.seterr(divide='warn', over='log', under='call', ... invalid='print') >>> np.base_repr(np.geterrobj()[1], 8) '4351' """) add_newdoc('numpy.core.umath', 'seterrobj', """ seterrobj(errobj) Set the object that defines floating-point error handling. The error object contains all information that defines the error handling behavior in NumPy. `seterrobj` is used internally by the other functions that set error handling behavior (`seterr`, `seterrcall`). Parameters ---------- errobj : list The error object, a list containing three elements: [internal numpy buffer size, error mask, error callback function]. The error mask is a single integer that holds the treatment information on all four floating point errors. The information for each error type is contained in three bits of the integer. If we print it in base 8, we can see what treatment is set for "invalid", "under", "over", and "divide" (in that order). The printed string can be interpreted with * 0 : 'ignore' * 1 : 'warn' * 2 : 'raise' * 3 : 'call' * 4 : 'print' * 5 : 'log' See Also -------- geterrobj, seterr, geterr, seterrcall, geterrcall getbufsize, setbufsize Notes ----- For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- >>> old_errobj = np.geterrobj() # first get the defaults >>> old_errobj [8192, 521, None] >>> def err_handler(type, flag): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> new_errobj = [20000, 12, err_handler] >>> np.seterrobj(new_errobj) >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') '14' >>> np.geterr() {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} >>> np.geterrcall() is err_handler True """) ############################################################################## # # compiled_base functions # ############################################################################## add_newdoc('numpy.core.multiarray', 'add_docstring', """ add_docstring(obj, docstring) Add a docstring to a built-in obj if possible. If the obj already has a docstring raise a RuntimeError If this routine does not know how to add a docstring to the object raise a TypeError """) add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', """ add_ufunc_docstring(ufunc, new_docstring) Replace the docstring for a ufunc with new_docstring. This method will only work if the current docstring for the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) Parameters ---------- ufunc : numpy.ufunc A ufunc whose current doc is NULL. new_docstring : string The new docstring for the ufunc. Notes ----- This method allocates memory for new_docstring on the heap. Technically this creates a mempory leak, since this memory will not be reclaimed until the end of the program even if the ufunc itself is removed. However this will only be a problem if the user is repeatedly creating ufuncs with no documentation, adding documentation via add_newdoc_ufunc, and then throwing away the ufunc. """) add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', """ format_float_OSprintf_g(val, precision) Print a floating point scalar using the system's printf function, equivalent to: printf("%.*g", precision, val); for half/float/double, or replacing 'g' by 'Lg' for longdouble. This method is designed to help cross-validate the format_float_* methods. Parameters ---------- val : python float or numpy floating scalar Value to format. precision : non-negative integer, optional Precision given to printf. Returns ------- rep : string The string representation of the floating point value See Also -------- format_float_scientific format_float_positional """) ############################################################################## # # Documentation for ufunc attributes and methods # ############################################################################## ############################################################################## # # ufunc object # ############################################################################## add_newdoc('numpy.core', 'ufunc', """ Functions that operate element by element on whole arrays. To see the documentation for a specific ufunc, use `info`. For example, ``np.info(np.sin)``. Because ufuncs are written in C (for speed) and linked into Python with NumPy's ufunc facility, Python's help() function finds this page whenever help() is called on a ufunc. A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. Calling ufuncs: =============== op(*x[, out], where=True, **kwargs) Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. The broadcasting rules are: * Dimensions of length 1 may be prepended to either array. * Arrays may be repeated along dimensions of length 1. Parameters ---------- *x : array_like Input arrays. out : ndarray, None, or tuple of ndarray and None, optional Alternate array object(s) in which to put the result; if provided, it must have a shape that the inputs broadcast to. A tuple of arrays (possible only as a keyword argument) must have length equal to the number of outputs; use `None` for uninitialized outputs to be allocated by the ufunc. where : array_like, optional This condition is broadcast over the input. At locations where the condition is True, the `out` array will be set to the ufunc result. Elsewhere, the `out` array will retain its original value. Note that if an uninitialized `out` array is created via the default ``out=None``, locations within it where the condition is False will remain uninitialized. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. Returns ------- r : ndarray or tuple of ndarray `r` will have the shape that the arrays in `x` broadcast to; if `out` is provided, it will be returned. If not, `r` will be allocated and may contain uninitialized values. If the function has more than one output, then the result will be a tuple of arrays. """) ############################################################################## # # ufunc attributes # ############################################################################## add_newdoc('numpy.core', 'ufunc', ('identity', """ The identity value. Data attribute containing the identity element for the ufunc, if it has one. If it does not, the attribute value is None. Examples -------- >>> np.add.identity 0 >>> np.multiply.identity 1 >>> np.power.identity 1 >>> print(np.exp.identity) None """)) add_newdoc('numpy.core', 'ufunc', ('nargs', """ The number of arguments. Data attribute containing the number of arguments the ufunc takes, including optional ones. Notes ----- Typically this value will be one more than what you might expect because all ufuncs take the optional "out" argument. Examples -------- >>> np.add.nargs 3 >>> np.multiply.nargs 3 >>> np.power.nargs 3 >>> np.exp.nargs 2 """)) add_newdoc('numpy.core', 'ufunc', ('nin', """ The number of inputs. Data attribute containing the number of arguments the ufunc treats as input. Examples -------- >>> np.add.nin 2 >>> np.multiply.nin 2 >>> np.power.nin 2 >>> np.exp.nin 1 """)) add_newdoc('numpy.core', 'ufunc', ('nout', """ The number of outputs. Data attribute containing the number of arguments the ufunc treats as output. Notes ----- Since all ufuncs can take output arguments, this will always be (at least) 1. Examples -------- >>> np.add.nout 1 >>> np.multiply.nout 1 >>> np.power.nout 1 >>> np.exp.nout 1 """)) add_newdoc('numpy.core', 'ufunc', ('ntypes', """ The number of types. The number of numerical NumPy types - of which there are 18 total - on which the ufunc can operate. See Also -------- numpy.ufunc.types Examples -------- >>> np.add.ntypes 18 >>> np.multiply.ntypes 18 >>> np.power.ntypes 17 >>> np.exp.ntypes 7 >>> np.remainder.ntypes 14 """)) add_newdoc('numpy.core', 'ufunc', ('types', """ Returns a list with types grouped input->output. Data attribute listing the data-type "Domain-Range" groupings the ufunc can deliver. The data-types are given using the character codes. See Also -------- numpy.ufunc.ntypes Examples -------- >>> np.add.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.multiply.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.power.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.exp.types ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] >>> np.remainder.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] """)) add_newdoc('numpy.core', 'ufunc', ('signature', """ Definition of the core elements a generalized ufunc operates on. The signature determines how the dimensions of each input/output array are split into core and loop dimensions: 1. Each dimension in the signature is matched to a dimension of the corresponding passed-in array, starting from the end of the shape tuple. 2. Core dimensions assigned to the same label in the signature must have exactly matching sizes, no broadcasting is performed. 3. The core dimensions are removed from all inputs and the remaining dimensions are broadcast together, defining the loop dimensions. Notes ----- Generalized ufuncs are used internally in many linalg functions, and in the testing suite; the examples below are taken from these. For ufuncs that operate on scalars, the signature is `None`, which is equivalent to '()' for every argument. Examples -------- >>> np.core.umath_tests.matrix_multiply.signature '(m,n),(n,p)->(m,p)' >>> np.linalg._umath_linalg.det.signature '(m,m)->()' >>> np.add.signature is None True # equivalent to '(),()->()' """)) ############################################################################## # # ufunc methods # ############################################################################## add_newdoc('numpy.core', 'ufunc', ('reduce', """ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True) Reduces `a`'s dimension by one, by applying ufunc along one axis. Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = the result of iterating `j` over :math:`range(N_i)`, cumulatively applying ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. For a one-dimensional array, reduce produces results equivalent to: :: r = op.identity # op = ufunc for i in range(len(A)): r = op(r, A[i]) return r For example, add.reduce() is equivalent to sum(). Parameters ---------- a : array_like The array to act on. axis : None or int or tuple of ints, optional Axis or axes along which a reduction is performed. The default (`axis` = 0) is perform a reduction over the first dimension of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is `None`, a reduction is performed over all the axes. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. For operations which are either not commutative or not associative, doing a reduction over multiple axes is not well-defined. The ufuncs do not currently raise an exception in this case, but will likely do so in the future. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data-type of the output array if this is provided, or the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.7.0 initial : scalar, optional The value with which to start the reduction. If the ufunc has no identity or the dtype is object, this defaults to None - otherwise it defaults to ufunc.identity. If ``None`` is given, the first element of the reduction is used, and an error is thrown if the reduction is empty. .. versionadded:: 1.15.0 where : array_like of bool, optional A boolean array which is broadcasted to match the dimensions of `a`, and selects elements to include in the reduction. Note that for ufuncs like ``minimum`` that do not have an identity defined, one has to pass in also ``initial``. .. versionadded:: 1.17.0 Returns ------- r : ndarray The reduced array. If `out` was supplied, `r` is a reference to it. Examples -------- >>> np.multiply.reduce([2,3,5]) 30 A multi-dimensional array example: >>> X = np.arange(8).reshape((2,2,2)) >>> X array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.add.reduce(X, 0) array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X) # confirm: default axis value is 0 array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X, 1) array([[ 2, 4], [10, 12]]) >>> np.add.reduce(X, 2) array([[ 1, 5], [ 9, 13]]) You can use the ``initial`` keyword argument to initialize the reduction with a different value, and ``where`` to select specific elements to include: >>> np.add.reduce([10], initial=5) 15 >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) array([14., 14.]) >>> a = np.array([10., np.nan, 10]) >>> np.add.reduce(a, where=~np.isnan(a)) 20.0 Allows reductions of empty arrays where they would normally fail, i.e. for ufuncs without an identity. >>> np.minimum.reduce([], initial=np.inf) inf >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) array([ 1., 10.]) >>> np.minimum.reduce([]) Traceback (most recent call last): ... ValueError: zero-size array to reduction operation minimum which has no identity """)) add_newdoc('numpy.core', 'ufunc', ('accumulate', """ accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. For a one-dimensional array, accumulate produces results equivalent to:: r = np.empty(len(A)) t = op.identity # op = the ufunc being applied to A's elements for i in range(len(A)): t = op(t, A[i]) r[i] = t return r For example, add.accumulate() is equivalent to np.cumsum(). For a multi-dimensional array, accumulate is applied along only one axis (axis zero by default; see Examples below) so repeated use is necessary if one wants to accumulate over multiple axes. Parameters ---------- array : array_like The array to act on. axis : int, optional The axis along which to apply the accumulation; default is zero. dtype : data-type code, optional The data-type used to represent the intermediate results. Defaults to the data-type of the output array if such is provided, or the the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. Returns ------- r : ndarray The accumulated values. If `out` was supplied, `r` is a reference to `out`. Examples -------- 1-D array examples: >>> np.add.accumulate([2, 3, 5]) array([ 2, 5, 10]) >>> np.multiply.accumulate([2, 3, 5]) array([ 2, 6, 30]) 2-D array examples: >>> I = np.eye(2) >>> I array([[1., 0.], [0., 1.]]) Accumulate along axis 0 (rows), down columns: >>> np.add.accumulate(I, 0) array([[1., 0.], [1., 1.]]) >>> np.add.accumulate(I) # no axis specified = axis zero array([[1., 0.], [1., 1.]]) Accumulate along axis 1 (columns), through rows: >>> np.add.accumulate(I, 1) array([[1., 1.], [0., 1.]]) """)) add_newdoc('numpy.core', 'ufunc', ('reduceat', """ reduceat(a, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. For i in ``range(len(indices))``, `reduceat` computes ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th generalized "row" parallel to `axis` in the final result (i.e., in a 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if `axis = 1`, it becomes the i-th column). There are three exceptions to this: * when ``i = len(indices) - 1`` (so for the last index), ``indices[i+1] = a.shape[axis]``. * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is simply ``a[indices[i]]``. * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. The shape of the output depends on the size of `indices`, and may be larger than `a` (this happens if ``len(indices) > a.shape[axis]``). Parameters ---------- a : array_like The array to act on. indices : array_like Paired indices, comma separated (not colon), specifying slices to reduce. axis : int, optional The axis along which to apply the reduceat. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data type of the output array if this is provided, or the data type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. Returns ------- r : ndarray The reduced values. If `out` was supplied, `r` is a reference to `out`. Notes ----- A descriptive example: If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as ``ufunc.reduceat(a, indices)[::2]`` where `indices` is ``range(len(array) - 1)`` with a zero placed in every other element: ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. Don't be fooled by this attribute's name: `reduceat(a)` is not necessarily smaller than `a`. Examples -------- To take the running sum of four successive values: >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] array([ 6, 10, 14, 18]) A 2-D example: >>> x = np.linspace(0, 15, 16).reshape(4,4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) :: # reduce such that the result has the following five rows: # [row1 + row2 + row3] # [row4] # [row2] # [row3] # [row1 + row2 + row3 + row4] >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) array([[12., 15., 18., 21.], [12., 13., 14., 15.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [24., 28., 32., 36.]]) :: # reduce such that result has the following two columns: # [col1 * col2 * col3, col4] >>> np.multiply.reduceat(x, [0, 3], 1) array([[ 0., 3.], [ 120., 7.], [ 720., 11.], [2184., 15.]]) """)) add_newdoc('numpy.core', 'ufunc', ('outer', """ outer(A, B, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of ``op.outer(A, B)`` is an array of dimension M + N such that: .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) For `A` and `B` one-dimensional, this is equivalent to:: r = empty(len(A),len(B)) for i in range(len(A)): for j in range(len(B)): r[i,j] = op(A[i], B[j]) # op = ufunc in question Parameters ---------- A : array_like First array B : array_like Second array kwargs : any Arguments to pass on to the ufunc. Typically `dtype` or `out`. Returns ------- r : ndarray Output array See Also -------- numpy.outer Examples -------- >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) array([[ 4, 5, 6], [ 8, 10, 12], [12, 15, 18]]) A multi-dimensional example: >>> A = np.array([[1, 2, 3], [4, 5, 6]]) >>> A.shape (2, 3) >>> B = np.array([[1, 2, 3, 4]]) >>> B.shape (1, 4) >>> C = np.multiply.outer(A, B) >>> C.shape; C (2, 3, 1, 4) array([[[[ 1, 2, 3, 4]], [[ 2, 4, 6, 8]], [[ 3, 6, 9, 12]]], [[[ 4, 8, 12, 16]], [[ 5, 10, 15, 20]], [[ 6, 12, 18, 24]]]]) """)) add_newdoc('numpy.core', 'ufunc', ('at', """ at(a, indices, b=None) Performs unbuffered in place operation on operand 'a' for elements specified by 'indices'. For addition ufunc, this method is equivalent to ``a[indices] += b``, except that results are accumulated for elements that are indexed more than once. For example, ``a[[0,0]] += 1`` will only increment the first element once because of buffering, whereas ``add.at(a, [0,0], 1)`` will increment the first element twice. .. versionadded:: 1.8.0 Parameters ---------- a : array_like The array to perform in place operation on. indices : array_like or tuple Array like index object or slice object for indexing into first operand. If first operand has multiple dimensions, indices can be a tuple of array like index objects or slice objects. b : array_like Second operand for ufuncs requiring two operands. Operand must be broadcastable over first operand after indexing or slicing. Examples -------- Set items 0 and 1 to their negative values: >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) >>> a array([-1, -2, 3, 4]) Increment items 0 and 1, and increment item 2 twice: >>> a = np.array([1, 2, 3, 4]) >>> np.add.at(a, [0, 1, 2, 2], 1) >>> a array([2, 3, 5, 4]) Add items 0 and 1 in first array to second array, and store results in first array: >>> a = np.array([1, 2, 3, 4]) >>> b = np.array([1, 2]) >>> np.add.at(a, [0, 1], b) >>> a array([2, 4, 3, 4]) """)) ############################################################################## # # Documentation for dtype attributes and methods # ############################################################################## ############################################################################## # # dtype object # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', """ dtype(obj, align=False, copy=False) Create a data type object. A numpy array is homogeneous, and contains elements described by a dtype object. A dtype object can be constructed from different combinations of fundamental numeric types. Parameters ---------- obj Object to be converted to a data type object. align : bool, optional Add padding to the fields to match what a C compiler would output for a similar C-struct. Can be ``True`` only if `obj` is a dictionary or a comma-separated string. If a struct dtype is being created, this also sets a sticky alignment flag ``isalignedstruct``. copy : bool, optional Make a new copy of the data-type object. If ``False``, the result may just be a reference to a built-in data-type object. See also -------- result_type Examples -------- Using array-scalar type: >>> np.dtype(np.int16) dtype('int16') Structured type, one field name 'f1', containing int16: >>> np.dtype([('f1', np.int16)]) dtype([('f1', '<i2')]) Structured type, one field named 'f1', in itself containing a structured type with one field: >>> np.dtype([('f1', [('f1', np.int16)])]) dtype([('f1', [('f1', '<i2')])]) Structured type, two fields: the first field contains an unsigned int, the second an int32: >>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) dtype([('f1', '<u8'), ('f2', '<i4')]) Using array-protocol type strings: >>> np.dtype([('a','f8'),('b','S10')]) dtype([('a', '<f8'), ('b', 'S10')]) Using comma-separated field formats. The shape is (2,3): >>> np.dtype("i4, (2,3)f8") dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))]) Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void`` is a flexible type, here of size 10: >>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) dtype([('hello', '<i8', (3,)), ('world', 'V10')]) Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are the offsets in bytes: >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) Using dictionaries. Two fields named 'gender' and 'age': >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) dtype([('gender', 'S1'), ('age', 'u1')]) Offsets in bytes, here 0 and 25: >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) dtype([('surname', 'S25'), ('age', 'u1')]) """) ############################################################################## # # dtype attributes # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', """ The required alignment (bytes) of this data-type according to the compiler. More information is available in the C-API section of the manual. Examples -------- >>> x = np.dtype('i4') >>> x.alignment 4 >>> x = np.dtype(float) >>> x.alignment 8 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', """ A character indicating the byte-order of this data-type object. One of: === ============== '=' native '<' little-endian '>' big-endian '|' not applicable === ============== All built-in data-type objects have byteorder either '=' or '|'. Examples -------- >>> dt = np.dtype('i2') >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers >>> np.dtype('i1').byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder '|' >>> # Even if specific code is given, and it is native >>> # '=' is the byteorder >>> import sys >>> sys_is_le = sys.byteorder == 'little' >>> native_code = sys_is_le and '<' or '>' >>> swapped_code = sys_is_le and '>' or '<' >>> dt = np.dtype(native_code + 'i2') >>> dt.byteorder '=' >>> # Swapped code shows up as itself >>> dt = np.dtype(swapped_code + 'i2') >>> dt.byteorder == swapped_code True """)) add_newdoc('numpy.core.multiarray', 'dtype', ('char', """A unique character code for each of the 21 different built-in types. Examples -------- >>> x = np.dtype(float) >>> x.char 'd' """)) add_newdoc('numpy.core.multiarray', 'dtype', ('descr', """ `__array_interface__` description of the data-type. The format is that required by the 'descr' key in the `__array_interface__` attribute. Warning: This attribute exists specifically for `__array_interface__`, and is not a datatype description compatible with `np.dtype`. Examples -------- >>> x = np.dtype(float) >>> x.descr [('', '<f8')] >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> dt.descr [('name', '<U16'), ('grades', '<f8', (2,))] """)) add_newdoc('numpy.core.multiarray', 'dtype', ('fields', """ Dictionary of named fields defined for this data type, or ``None``. The dictionary is indexed by keys that are the names of the fields. Each entry in the dictionary is a tuple fully describing the field:: (dtype, offset[, title]) Offset is limited to C int, which is signed and usually 32 bits. If present, the optional title can be any object (if it is a string or unicode then it will also be a key in the fields dictionary, otherwise it's meta-data). Notice also that the first two elements of the tuple can be passed directly as arguments to the ``ndarray.getfield`` and ``ndarray.setfield`` methods. See Also -------- ndarray.getfield, ndarray.setfield Examples -------- >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} """)) add_newdoc('numpy.core.multiarray', 'dtype', ('flags', """ Bit-flags describing how this data type is to be interpreted. Bit-masks are in `numpy.core.multiarray` as the constants `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation of these flags is in C-API documentation; they are largely useful for user-defined data-types. The following example demonstrates that operations on this particular dtype requires Python C-API. Examples -------- >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) >>> x.flags 16 >>> np.core.multiarray.NEEDS_PYAPI 16 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', """ Boolean indicating whether this dtype contains any reference-counted objects in any fields or sub-dtypes. Recall that what is actually in the ndarray memory representing the Python object is the memory address of that object (a pointer). Special handling may be required, and this attribute is useful for distinguishing data types that may contain arbitrary Python objects and data-types that won't. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', """ Integer indicating how this dtype relates to the built-in dtypes. Read-only. = ======================================================================== 0 if this is a structured array type, with fields 1 if this is a dtype compiled into numpy (such as ints, floats etc) 2 if the dtype is for a user-defined numpy type A user-defined type uses the numpy C-API machinery to extend numpy to handle a new array type. See :ref:`user.user-defined-data-types` in the NumPy manual. = ======================================================================== Examples -------- >>> dt = np.dtype('i2') >>> dt.isbuiltin 1 >>> dt = np.dtype('f8') >>> dt.isbuiltin 1 >>> dt = np.dtype([('field1', 'f8')]) >>> dt.isbuiltin 0 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', """ Boolean indicating whether the byte order of this dtype is native to the platform. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', """ Boolean indicating whether the dtype is a struct which maintains field alignment. This flag is sticky, so when combining multiple structs together, it is preserved and produces new dtypes which are also aligned. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', """ The element size of this data-type object. For 18 of the 21 types this number is fixed by the data-type. For the flexible data-types, this number can be anything. Examples -------- >>> arr = np.array([[1, 2], [3, 4]]) >>> arr.dtype dtype('int64') >>> arr.itemsize 8 >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> dt.itemsize 80 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('kind', """ A character code (one of 'biufcmMOSUV') identifying the general kind of data. = ====================== b boolean i signed integer u unsigned integer f floating-point c complex floating-point m timedelta M datetime O object S (byte-)string U Unicode V void = ====================== Examples -------- >>> dt = np.dtype('i4') >>> dt.kind 'i' >>> dt = np.dtype('f8') >>> dt.kind 'f' >>> dt = np.dtype([('field1', 'f8')]) >>> dt.kind 'V' """)) add_newdoc('numpy.core.multiarray', 'dtype', ('name', """ A bit-width name for this data-type. Un-sized flexible data-type objects do not have this attribute. Examples -------- >>> x = np.dtype(float) >>> x.name 'float64' >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) >>> x.name 'void640' """)) add_newdoc('numpy.core.multiarray', 'dtype', ('names', """ Ordered list of field names, or ``None`` if there are no fields. The names are ordered according to increasing byte offset. This can be used, for example, to walk through all of the named fields in offset order. Examples -------- >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> dt.names ('name', 'grades') """)) add_newdoc('numpy.core.multiarray', 'dtype', ('num', """ A unique number for each of the 21 different built-in types. These are roughly ordered from least-to-most precision. Examples -------- >>> dt = np.dtype(str) >>> dt.num 19 >>> dt = np.dtype(float) >>> dt.num 12 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('shape', """ Shape tuple of the sub-array if this data type describes a sub-array, and ``()`` otherwise. Examples -------- >>> dt = np.dtype(('i4', 4)) >>> dt.shape (4,) >>> dt = np.dtype(('i4', (2, 3))) >>> dt.shape (2, 3) """)) add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', """ Number of dimensions of the sub-array if this data type describes a sub-array, and ``0`` otherwise. .. versionadded:: 1.13.0 Examples -------- >>> x = np.dtype(float) >>> x.ndim 0 >>> x = np.dtype((float, 8)) >>> x.ndim 1 >>> x = np.dtype(('i4', (3, 4))) >>> x.ndim 2 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('str', """The array-protocol typestring of this data-type object.""")) add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', """ Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and None otherwise. The *shape* is the fixed shape of the sub-array described by this data type, and *item_dtype* the data type of the array. If a field whose dtype object has this attribute is retrieved, then the extra dimensions implied by *shape* are tacked on to the end of the retrieved array. See Also -------- dtype.base Examples -------- >>> x = numpy.dtype('8f') >>> x.subdtype (dtype('float32'), (8,)) >>> x = numpy.dtype('i2') >>> x.subdtype >>> """)) add_newdoc('numpy.core.multiarray', 'dtype', ('base', """ Returns dtype for the base element of the subarrays, regardless of their dimension or shape. See Also -------- dtype.subdtype Examples -------- >>> x = numpy.dtype('8f') >>> x.base dtype('float32') >>> x = numpy.dtype('i2') >>> x.base dtype('int16') """)) add_newdoc('numpy.core.multiarray', 'dtype', ('type', """The type object used to instantiate a scalar of this data-type.""")) ############################################################################## # # dtype methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', """ newbyteorder(new_order='S') Return a new dtype with a different byte order. Changes are also made in all fields and sub-arrays of the data type. Parameters ---------- new_order : string, optional Byte order to force; a value from the byte order specifications below. The default value ('S') results in swapping the current byte order. `new_order` codes can be any of: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) The code does a case-insensitive check on the first letter of `new_order` for these alternatives. For example, any of '>' or 'B' or 'b' or 'brian' are valid to specify big-endian. Returns ------- new_dtype : dtype New dtype object with the given change to the byte order. Notes ----- Changes are also made in all fields and sub-arrays of the data type. Examples -------- >>> import sys >>> sys_is_le = sys.byteorder == 'little' >>> native_code = sys_is_le and '<' or '>' >>> swapped_code = sys_is_le and '>' or '<' >>> native_dt = np.dtype(native_code+'i2') >>> swapped_dt = np.dtype(swapped_code+'i2') >>> native_dt.newbyteorder('S') == swapped_dt True >>> native_dt.newbyteorder() == swapped_dt True >>> native_dt == swapped_dt.newbyteorder('S') True >>> native_dt == swapped_dt.newbyteorder('=') True >>> native_dt == swapped_dt.newbyteorder('N') True >>> native_dt == native_dt.newbyteorder('|') True >>> np.dtype('<i2') == native_dt.newbyteorder('<') True >>> np.dtype('<i2') == native_dt.newbyteorder('L') True >>> np.dtype('>i2') == native_dt.newbyteorder('>') True >>> np.dtype('>i2') == native_dt.newbyteorder('B') True """)) ############################################################################## # # Datetime-related Methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'busdaycalendar', """ busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information defining valid days for the busday family of functions. The default valid days are Monday through Friday ("business days"). A busdaycalendar object can be specified with any set of weekly valid days, plus an optional "holiday" dates that always will be invalid. Once a busdaycalendar object is created, the weekmask and holidays cannot be modified. .. versionadded:: 1.7.0 Parameters ---------- weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates, no matter which weekday they fall upon. Holiday dates may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. Returns ------- out : busdaycalendar A business day calendar object containing the specified weekmask and holidays values. See Also -------- is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. busday_count : Counts how many valid days are in a half-open date range. Attributes ---------- Note: once a busdaycalendar object is created, you cannot modify the weekmask or holidays. The attributes return copies of internal data. weekmask : (copy) seven-element array of bool holidays : (copy) sorted array of datetime64[D] Examples -------- >>> # Some important days in July ... bdd = np.busdaycalendar( ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) >>> # Default is Monday to Friday weekdays ... bdd.weekmask array([ True, True, True, True, True, False, False]) >>> # Any holidays already on the weekend are removed ... bdd.holidays array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') """) add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', """A copy of the seven-element boolean mask indicating valid days.""")) add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', """A copy of the holiday array indicating additional invalid days.""")) add_newdoc('numpy.core.multiarray', 'normalize_axis_index', """ normalize_axis_index(axis, ndim, msg_prefix=None) Normalizes an axis index, `axis`, such that is a valid positive index into the shape of array with `ndim` dimensions. Raises an AxisError with an appropriate message if this is not possible. Used internally by all axis-checking logic. .. versionadded:: 1.13.0 Parameters ---------- axis : int The un-normalized index of the axis. Can be negative ndim : int The number of dimensions of the array that `axis` should be normalized against msg_prefix : str A prefix to put before the message, typically the name of the argument Returns ------- normalized_axis : int The normalized axis index, such that `0 <= normalized_axis < ndim` Raises ------ AxisError If the axis index is invalid, when `-ndim <= axis < ndim` is false. Examples -------- >>> normalize_axis_index(0, ndim=3) 0 >>> normalize_axis_index(1, ndim=3) 1 >>> normalize_axis_index(-1, ndim=3) 2 >>> normalize_axis_index(3, ndim=3) Traceback (most recent call last): ... AxisError: axis 3 is out of bounds for array of dimension 3 >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') Traceback (most recent call last): ... AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 """) add_newdoc('numpy.core.multiarray', 'datetime_data', """ datetime_data(dtype, /) Get information about the step size of a date or time type. The returned tuple can be passed as the second argument of `numpy.datetime64` and `numpy.timedelta64`. Parameters ---------- dtype : dtype The dtype object, which must be a `datetime64` or `timedelta64` type. Returns ------- unit : str The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype is based. count : int The number of base units in a step. Examples -------- >>> dt_25s = np.dtype('timedelta64[25s]') >>> np.datetime_data(dt_25s) ('s', 25) >>> np.array(10, dt_25s).astype('timedelta64[s]') array(250, dtype='timedelta64[s]') The result can be used to construct a datetime that uses the same units as a timedelta >>> np.datetime64('2010', np.datetime_data(dt_25s)) numpy.datetime64('2010-01-01T00:00:00','25s') """) ############################################################################## # # Documentation for `generic` attributes and methods # ############################################################################## add_newdoc('numpy.core.numerictypes', 'generic', """ Base class for numpy scalar types. Class from which most (all?) numpy scalar types are derived. For consistency, exposes the same API as `ndarray`, despite many consequent attributes being either "get-only," or completely irrelevant. This is the class from which it is strongly suggested users should derive custom scalar types. """) # Attributes add_newdoc('numpy.core.numerictypes', 'generic', ('T', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('base', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', """Get array data-descriptor.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('flags', """The integer value of flags.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('flat', """A 1-D view of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('imag', """The imaginary part of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', """The length of one element in bytes.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', """The length of the scalar in bytes.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', """The number of array dimensions.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('real', """The real part of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('shape', """Tuple of array dimensions.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('size', """The number of elements in the gentype.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('strides', """Tuple of bytes steps in each dimension.""")) # Methods add_newdoc('numpy.core.numerictypes', 'generic', ('all', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('any', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('astype', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('choose', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('clip', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('compress', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('copy', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('dump', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('fill', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('item', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('max', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('mean', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('min', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', """ newbyteorder(new_order='S') Return a new `dtype` with a different byte order. Changes are also made in all fields and sub-arrays of the data type. The `new_order` code can be any from the following: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) Parameters ---------- new_order : str, optional Byte order to force; a value from the byte order specifications above. The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first letter of `new_order` for the alternatives above. For example, any of 'B' or 'b' or 'biggish' are valid to specify big-endian. Returns ------- new_dtype : dtype New `dtype` object with the given change to the byte order. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('prod', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('put', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('resize', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('round', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('sort', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('std', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('sum', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('take', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('trace', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('var', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('view', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See also the corresponding attribute of the derived class of interest. """)) ############################################################################## # # Documentation for scalar type abstract base classes in type hierarchy # ############################################################################## add_newdoc('numpy.core.numerictypes', 'number', """ Abstract base class of all numeric scalar types. """) add_newdoc('numpy.core.numerictypes', 'integer', """ Abstract base class of all integer scalar types. """) add_newdoc('numpy.core.numerictypes', 'signedinteger', """ Abstract base class of all signed integer scalar types. """) add_newdoc('numpy.core.numerictypes', 'unsignedinteger', """ Abstract base class of all unsigned integer scalar types. """) add_newdoc('numpy.core.numerictypes', 'inexact', """ Abstract base class of all numeric scalar types with a (potentially) inexact representation of the values in its range, such as floating-point numbers. """) add_newdoc('numpy.core.numerictypes', 'floating', """ Abstract base class of all floating-point scalar types. """) add_newdoc('numpy.core.numerictypes', 'complexfloating', """ Abstract base class of all complex number scalar types that are made up of floating-point numbers. """) add_newdoc('numpy.core.numerictypes', 'flexible', """ Abstract base class of all scalar types without predefined length. The actual size of these types depends on the specific `np.dtype` instantiation. """) add_newdoc('numpy.core.numerictypes', 'character', """ Abstract base class of all character string scalar types. """) ############################################################################## # # Documentation for concrete scalar classes # ############################################################################## def numeric_type_aliases(aliases): def type_aliases_gen(): for alias, doc in aliases: try: alias_type = getattr(_numerictypes, alias) except AttributeError: # The set of aliases that actually exist varies between platforms pass else: yield (alias_type, alias, doc) return list(type_aliases_gen()) possible_aliases = numeric_type_aliases([ ('int8', '8-bit signed integer (-128 to 127)'), ('int16', '16-bit signed integer (-32768 to 32767)'), ('int32', '32-bit signed integer (-2147483648 to 2147483647)'), ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'), ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), ('uint8', '8-bit unsigned integer (0 to 255)'), ('uint16', '16-bit unsigned integer (0 to 65535)'), ('uint32', '32-bit unsigned integer (0 to 4294967295)'), ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'), ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), ('float96', '96-bit extended-precision floating-point number type'), ('float128', '128-bit extended-precision floating-point number type'), ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), ]) def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): o = getattr(_numerictypes, obj) character_code = dtype(o).char canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj) alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases) alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc) for (alias_type, alias, doc) in possible_aliases if alias_type is o) docstring = """ {doc} Character code: ``'{character_code}'``. {canonical_name_doc}{alias_doc} """.format(doc=doc.strip(), character_code=character_code, canonical_name_doc=canonical_name_doc, alias_doc=alias_doc) add_newdoc('numpy.core.numerictypes', obj, docstring) add_newdoc_for_scalar_type('bool_', ['bool8'], """ Boolean type (True or False), stored as a byte. """) add_newdoc_for_scalar_type('byte', [], """ Signed integer type, compatible with C ``char``. """) add_newdoc_for_scalar_type('short', [], """ Signed integer type, compatible with C ``short``. """) add_newdoc_for_scalar_type('intc', [], """ Signed integer type, compatible with C ``int``. """) add_newdoc_for_scalar_type('int_', [], """ Signed integer type, compatible with Python `int` anc C ``long``. """) add_newdoc_for_scalar_type('longlong', [], """ Signed integer type, compatible with C ``long long``. """) add_newdoc_for_scalar_type('ubyte', [], """ Unsigned integer type, compatible with C ``unsigned char``. """) add_newdoc_for_scalar_type('ushort', [], """ Unsigned integer type, compatible with C ``unsigned short``. """) add_newdoc_for_scalar_type('uintc', [], """ Unsigned integer type, compatible with C ``unsigned int``. """) add_newdoc_for_scalar_type('uint', [], """ Unsigned integer type, compatible with C ``unsigned long``. """) add_newdoc_for_scalar_type('ulonglong', [], """ Signed integer type, compatible with C ``unsigned long long``. """) add_newdoc_for_scalar_type('half', [], """ Half-precision floating-point number type. """) add_newdoc_for_scalar_type('single', [], """ Single-precision floating-point number type, compatible with C ``float``. """) add_newdoc_for_scalar_type('double', ['float_'], """ Double-precision floating-point number type, compatible with Python `float` and C ``double``. """) add_newdoc_for_scalar_type('longdouble', ['longfloat'], """ Extended-precision floating-point number type, compatible with C ``long double`` but not necessarily with IEEE 754 quadruple-precision. """) add_newdoc_for_scalar_type('csingle', ['singlecomplex'], """ Complex number type composed of two single-precision floating-point numbers. """) add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], """ Complex number type composed of two double-precision floating-point numbers, compatible with Python `complex`. """) add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], """ Complex number type composed of two extended-precision floating-point numbers. """) add_newdoc_for_scalar_type('object_', [], """ Any Python object. """) # TODO: work out how to put this on the base class, np.floating for float_name in ('half', 'single', 'double', 'longdouble'): add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio', """ {ftype}.as_integer_ratio() -> (int, int) Return a pair of integers, whose ratio is exactly equal to the original floating point number, and with a positive denominator. Raise OverflowError on infinities and a ValueError on NaNs. >>> np.{ftype}(10.0).as_integer_ratio() (10, 1) >>> np.{ftype}(0.0).as_integer_ratio() (0, 1) >>> np.{ftype}(-.25).as_integer_ratio() (-1, 4) """.format(ftype=float_name)))
from __future__ import division, absolute_import, print_function import sys import os import shutil import mmap import pytest from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp from numpy import ( memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) from numpy.compat import Path from numpy import arange, allclose, asarray from numpy.testing import ( assert_, assert_equal, assert_array_equal, suppress_warnings ) class TestMemmap(object): def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.tempdir = mkdtemp() self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) def teardown(self): self.tmpfp.close() shutil.rmtree(self.tempdir) def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp # Test __del__ machinery, which handles cleanup # Read data back from file newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', shape=self.shape) assert_(allclose(self.data, newfp)) assert_array_equal(self.data, newfp) assert_equal(newfp.flags.writeable, False) def test_open_with_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp def test_unnamed_file(self): with TemporaryFile() as f: fp = memmap(f, dtype=self.dtype, shape=self.shape) del fp def test_attributes(self): offset = 1 mode = "w+" fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, shape=self.shape, offset=offset) assert_equal(offset, fp.offset) assert_equal(mode, fp.mode) del fp def test_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) abspath = os.path.abspath(tmpname) fp[:] = self.data[:] assert_equal(abspath, fp.filename) b = fp[:1] assert_equal(abspath, b.filename) del b del fp @pytest.mark.skipif(Path is None, reason="No pathlib.Path") def test_path(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', shape=self.shape) # os.path.realpath does not resolve symlinks on Windows # see: https://bugs.python.org/issue9949 # use Path.resolve, just as memmap class does internally abspath = str(Path(tmpname).resolve()) fp[:] = self.data[:] assert_equal(abspath, str(fp.filename.resolve())) b = fp[:1] assert_equal(abspath, str(b.filename.resolve())) del b del fp def test_filename_fileobj(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", shape=self.shape) assert_equal(fp.filename, self.tmpfp.name) @pytest.mark.skipif(sys.platform == 'gnu0', reason="Known to fail on hurd") def test_flush(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] assert_equal(fp[0], self.data[0]) fp.flush() def test_del(self): # Make sure a view does not delete the underlying mmap fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp_base[0] = 5 fp_view = fp_base[0:1] assert_equal(fp_view[0], 5) del fp_view # Should still be able to access and assign values after # deleting the view assert_equal(fp_base[0], 5) fp_base[0] = 6 assert_equal(fp_base[0], 6) def test_arithmetic_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = (fp + 10) if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_indexing_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = fp[(1, 2), (2, 3)] if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_slicing_keeps_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) assert_(fp[:2, :2]._mmap is fp._mmap) def test_view(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) new1 = fp.view() new2 = new1.view() assert_(new1.base is fp) assert_(new2.base is fp) new_array = asarray(fp) assert_(new_array.base is fp) def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data with suppress_warnings() as sup: sup.filter(FutureWarning, "np.average currently does not preserve") for unary_op in [sum, average, product]: result = unary_op(fp) assert_(isscalar(result)) assert_(result.__class__ is self.data[0, 0].__class__) assert_(unary_op(fp, axis=0).__class__ is ndarray) assert_(unary_op(fp, axis=1).__class__ is ndarray) for binary_op in [add, subtract, multiply]: assert_(binary_op(fp, self.data).__class__ is ndarray) assert_(binary_op(self.data, fp).__class__ is ndarray) assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 assert(fp.__class__ is memmap) add(fp, 1, out=fp) assert(fp.__class__ is memmap) def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data assert_(fp[1:, :-1].__class__ is memmap) # Fancy indexing returns a copy that is not memmapped assert_(fp[[0, 1]].__class__ is ndarray) def test_memmap_subclass(self): class MemmapSubClass(memmap): pass fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the # ufunc and __getitem__ output is never turned into a ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) assert(fp[[0, 1]].__class__ is MemmapSubClass) def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY offset = mmap.ALLOCATIONGRANULARITY + 1 fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) def test_no_shape(self): self.tmpfp.write(b'a'*16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): memmap(self.tmpfp, shape=(0,4), mode='w+') self.tmpfp.write(b'\0') # ok now the file is not empty memmap(self.tmpfp, shape=(0,4), mode='w+')
pizzathief/numpy
numpy/core/tests/test_memmap.py
numpy/core/_add_newdocs.py
"""============================= Subclassing ndarray in python ============================= Introduction ------------ Subclassing ndarray is relatively simple, but it has some complications compared to other Python objects. On this page we explain the machinery that allows you to subclass ndarray, and the implications for implementing a subclass. ndarrays and object creation ============================ Subclassing ndarray is complicated by the fact that new instances of ndarray classes can come about in three different ways. These are: #. Explicit constructor call - as in ``MySubClass(params)``. This is the usual route to Python instance creation. #. View casting - casting an existing ndarray as a given subclass #. New from template - creating a new instance from a template instance. Examples include returning slices from a subclassed array, creating return types from ufuncs, and copying arrays. See :ref:`new-from-template` for more details The last two are characteristics of ndarrays - in order to support things like array slicing. The complications of subclassing ndarray are due to the mechanisms numpy has to support these latter two routes of instance creation. .. _view-casting: View casting ------------ *View casting* is the standard ndarray mechanism by which you take an ndarray of any subclass, and return a view of the array as another (specified) subclass: >>> import numpy as np >>> # create a completely useless ndarray subclass >>> class C(np.ndarray): pass >>> # create a standard ndarray >>> arr = np.zeros((3,)) >>> # take a view of it, as our useless subclass >>> c_arr = arr.view(C) >>> type(c_arr) <class 'C'> .. _new-from-template: Creating new from template -------------------------- New instances of an ndarray subclass can also come about by a very similar mechanism to :ref:`view-casting`, when numpy finds it needs to create a new instance from a template instance. The most obvious place this has to happen is when you are taking slices of subclassed arrays. For example: >>> v = c_arr[1:] >>> type(v) # the view is of type 'C' <class 'C'> >>> v is c_arr # but it's a new instance False The slice is a *view* onto the original ``c_arr`` data. So, when we take a view from the ndarray, we return a new ndarray, of the same class, that points to the data in the original. There are other points in the use of ndarrays where we need such views, such as copying arrays (``c_arr.copy()``), creating ufunc output arrays (see also :ref:`array-wrap`), and reducing methods (like ``c_arr.mean()``. Relationship of view casting and new-from-template -------------------------------------------------- These paths both use the same machinery. We make the distinction here, because they result in different input to your methods. Specifically, :ref:`view-casting` means you have created a new instance of your array type from any potential subclass of ndarray. :ref:`new-from-template` means you have created a new instance of your class from a pre-existing instance, allowing you - for example - to copy across attributes that are particular to your subclass. Implications for subclassing ---------------------------- If we subclass ndarray, we need to deal not only with explicit construction of our array type, but also :ref:`view-casting` or :ref:`new-from-template`. NumPy has the machinery to do this, and this machinery that makes subclassing slightly non-standard. There are two aspects to the machinery that ndarray uses to support views and new-from-template in subclasses. The first is the use of the ``ndarray.__new__`` method for the main work of object initialization, rather then the more usual ``__init__`` method. The second is the use of the ``__array_finalize__`` method to allow subclasses to clean up after the creation of views and new instances from templates. A brief Python primer on ``__new__`` and ``__init__`` ===================================================== ``__new__`` is a standard Python method, and, if present, is called before ``__init__`` when we create a class instance. See the `python __new__ documentation <https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail. For example, consider the following Python code: .. testcode:: class C(object): def __new__(cls, *args): print('Cls in __new__:', cls) print('Args in __new__:', args) # The `object` type __new__ method takes a single argument. return object.__new__(cls) def __init__(self, *args): print('type(self) in __init__:', type(self)) print('Args in __init__:', args) meaning that we get: >>> c = C('hello') Cls in __new__: <class 'C'> Args in __new__: ('hello',) type(self) in __init__: <class 'C'> Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class as first argument, and the passed argument, which is the string ``'hello'``. After python calls ``__new__``, it usually (see below) calls our ``__init__`` method, with the output of ``__new__`` as the first argument (now a class instance), and the passed arguments following. As you can see, the object can be initialized in the ``__new__`` method or the ``__init__`` method, or both, and in fact ndarray does not have an ``__init__`` method, because all the initialization is done in the ``__new__`` method. Why use ``__new__`` rather than just the usual ``__init__``? Because in some cases, as for ndarray, we want to be able to return an object of some other class. Consider the following: .. testcode:: class D(C): def __new__(cls, *args): print('D cls is:', cls) print('D args in __new__:', args) return C.__new__(C, *args) def __init__(self, *args): # we never get here print('In D __init__') meaning that: >>> obj = D('hello') D cls is: <class 'D'> D args in __new__: ('hello',) Cls in __new__: <class 'C'> Args in __new__: ('hello',) >>> type(obj) <class 'C'> The definition of ``C`` is the same as before, but for ``D``, the ``__new__`` method returns an instance of class ``C`` rather than ``D``. Note that the ``__init__`` method of ``D`` does not get called. In general, when the ``__new__`` method returns an object of class other than the class in which it is defined, the ``__init__`` method of that class is not called. This is how subclasses of the ndarray class are able to return views that preserve the class type. When taking a view, the standard ndarray machinery creates the new ndarray object with something like:: obj = ndarray.__new__(subtype, shape, ... where ``subdtype`` is the subclass. Thus the returned view is of the same class as the subclass, rather than being of class ``ndarray``. That solves the problem of returning views of the same type, but now we have a new problem. The machinery of ndarray can set the class this way, in its standard methods for taking views, but the ndarray ``__new__`` method knows nothing of what we have done in our own ``__new__`` method in order to set attributes, and so on. (Aside - why not call ``obj = subdtype.__new__(...`` then? Because we may not have a ``__new__`` method with the same call signature). The role of ``__array_finalize__`` ================================== ``__array_finalize__`` is the mechanism that numpy provides to allow subclasses to handle the various ways that new instances get created. Remember that subclass instances can come about in these three ways: #. explicit constructor call (``obj = MySubClass(params)``). This will call the usual sequence of ``MySubClass.__new__`` then (if it exists) ``MySubClass.__init__``. #. :ref:`view-casting` #. :ref:`new-from-template` Our ``MySubClass.__new__`` method only gets called in the case of the explicit constructor call, so we can't rely on ``MySubClass.__new__`` or ``MySubClass.__init__`` to deal with the view casting and new-from-template. It turns out that ``MySubClass.__array_finalize__`` *does* get called for all three methods of object creation, so this is where our object creation housekeeping usually goes. * For the explicit constructor call, our subclass will need to create a new ndarray instance of its own class. In practice this means that we, the authors of the code, will need to make a call to ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an existing array (see below) * For view casting and new-from-template, the equivalent of ``ndarray.__new__(MySubClass,...`` is called, at the C level. The arguments that ``__array_finalize__`` receives differ for the three methods of instance creation above. The following code allows us to look at the call sequences and arguments: .. testcode:: import numpy as np class C(np.ndarray): def __new__(cls, *args, **kwargs): print('In __new__ with class %s' % cls) return super(C, cls).__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): # in practice you probably will not need or want an __init__ # method for your subclass print('In __init__ with class %s' % self.__class__) def __array_finalize__(self, obj): print('In array_finalize:') print(' self type is %s' % type(self)) print(' obj type is %s' % type(obj)) Now: >>> # Explicit constructor >>> c = C((10,)) In __new__ with class <class 'C'> In array_finalize: self type is <class 'C'> obj type is <type 'NoneType'> In __init__ with class <class 'C'> >>> # View casting >>> a = np.arange(10) >>> cast_a = a.view(C) In array_finalize: self type is <class 'C'> obj type is <type 'numpy.ndarray'> >>> # Slicing (example of new-from-template) >>> cv = c[:1] In array_finalize: self type is <class 'C'> obj type is <class 'C'> The signature of ``__array_finalize__`` is:: def __array_finalize__(self, obj): One sees that the ``super`` call, which goes to ``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our own class (``self``) as well as the object from which the view has been taken (``obj``). As you can see from the output above, the ``self`` is always a newly created instance of our subclass, and the type of ``obj`` differs for the three instance creation methods: * When called from the explicit constructor, ``obj`` is ``None`` * When called from view casting, ``obj`` can be an instance of any subclass of ndarray, including our own. * When called in new-from-template, ``obj`` is another instance of our own subclass, that we might use to update the new ``self`` instance. Because ``__array_finalize__`` is the only method that always sees new instances being created, it is the sensible place to fill in instance defaults for new object attributes, among other tasks. This may be clearer with an example. Simple example - adding an extra attribute to ndarray ----------------------------------------------------- .. testcode:: import numpy as np class InfoArray(np.ndarray): def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard # ndarray constructor, but return an object of our type. # It also triggers a call to InfoArray.__array_finalize__ obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, buffer, offset, strides, order) # set the new 'info' attribute to the value passed obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # ``self`` is a new object resulting from # ndarray.__new__(InfoArray, ...), therefore it only has # attributes that the ndarray.__new__ constructor gave it - # i.e. those of a standard ndarray. # # We could have got to the ndarray.__new__ call in 3 ways: # From an explicit constructor - e.g. InfoArray(): # obj is None # (we're in the middle of the InfoArray.__new__ # constructor, and self.info will be set when we return to # InfoArray.__new__) if obj is None: return # From view casting - e.g arr.view(InfoArray): # obj is arr # (type(obj) can be InfoArray) # From new-from-template - e.g infoarr[:3] # type(obj) is InfoArray # # Note that it is here, rather than in the __new__ method, # that we set the default value for 'info', because this # method sees all creation of default objects - with the # InfoArray.__new__ constructor, but also with # arr.view(InfoArray). self.info = getattr(obj, 'info', None) # We do not need to return anything Using the object looks like this: >>> obj = InfoArray(shape=(3,)) # explicit constructor >>> type(obj) <class 'InfoArray'> >>> obj.info is None True >>> obj = InfoArray(shape=(3,), info='information') >>> obj.info 'information' >>> v = obj[1:] # new-from-template - here - slicing >>> type(v) <class 'InfoArray'> >>> v.info 'information' >>> arr = np.arange(10) >>> cast_arr = arr.view(InfoArray) # view casting >>> type(cast_arr) <class 'InfoArray'> >>> cast_arr.info is None True This class isn't very useful, because it has the same constructor as the bare ndarray object, including passing in buffers and shapes and so on. We would probably prefer the constructor to be able to take an already formed ndarray from the usual numpy calls to ``np.array`` and return an object. Slightly more realistic example - attribute added to existing array ------------------------------------------------------------------- Here is a class that takes a standard ndarray that already exists, casts as our type, and adds an extra attribute. .. testcode:: import numpy as np class RealisticInfoArray(np.ndarray): def __new__(cls, input_array, info=None): # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array).view(cls) # add the new attribute to the created instance obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.info = getattr(obj, 'info', None) So: >>> arr = np.arange(5) >>> obj = RealisticInfoArray(arr, info='information') >>> type(obj) <class 'RealisticInfoArray'> >>> obj.info 'information' >>> v = obj[1:] >>> type(v) <class 'RealisticInfoArray'> >>> v.info 'information' .. _array-ufunc: ``__array_ufunc__`` for ufuncs ------------------------------ .. versionadded:: 1.13 A subclass can override what happens when executing numpy ufuncs on it by overriding the default ``ndarray.__array_ufunc__`` method. This method is executed *instead* of the ufunc and should return either the result of the operation, or :obj:`NotImplemented` if the operation requested is not implemented. The signature of ``__array_ufunc__`` is:: def __array_ufunc__(ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either ``"__call__"`` to indicate it was called directly, or one of its :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``, ``"reduceat"``, ``"outer"``, or ``"at"``. - *inputs* is a tuple of the input arguments to the ``ufunc`` - *kwargs* contains any optional or keyword arguments passed to the function. This includes any ``out`` arguments, which are always contained in a tuple. A typical implementation would convert any inputs or outputs that are instances of one's own class, pass everything on to a superclass using ``super()``, and finally return the results after possible back-conversion. An example, taken from the test case ``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the following. .. testcode:: input numpy as np class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): args = [] in_no = [] for i, input_ in enumerate(inputs): if isinstance(input_, A): in_no.append(i) args.append(input_.view(np.ndarray)) else: args.append(input_) outputs = kwargs.pop('out', None) out_no = [] if outputs: out_args = [] for j, output in enumerate(outputs): if isinstance(output, A): out_no.append(j) out_args.append(output.view(np.ndarray)) else: out_args.append(output) kwargs['out'] = tuple(out_args) else: outputs = (None,) * ufunc.nout info = {} if in_no: info['inputs'] = in_no if out_no: info['outputs'] = out_no results = super(A, self).__array_ufunc__(ufunc, method, *args, **kwargs) if results is NotImplemented: return NotImplemented if method == 'at': if isinstance(inputs[0], A): inputs[0].info = info return if ufunc.nout == 1: results = (results,) results = tuple((np.asarray(result).view(A) if output is None else output) for result, output in zip(results, outputs)) if results and isinstance(results[0], A): results[0].info = info return results[0] if len(results) == 1 else results So, this class does not actually do anything interesting: it just converts any instances of its own to regular ndarray (otherwise, we'd get infinite recursion!), and adds an ``info`` dictionary that tells which inputs and outputs it converted. Hence, e.g., >>> a = np.arange(5.).view(A) >>> b = np.sin(a) >>> b.info {'inputs': [0]} >>> b = np.sin(np.arange(5.), out=(a,)) >>> b.info {'outputs': [0]} >>> a = np.arange(5.).view(A) >>> b = np.ones(1).view(A) >>> c = a + b >>> c.info {'inputs': [0, 1]} >>> a += b >>> a.info {'inputs': [0, 1], 'outputs': [0]} Note that another approach would be to to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand also defines ``__array_ufunc__``. E.g., lets assume that we evalulate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which means it cannot evaluate the result itself. Thus, it will return `NotImplemented` and so will our class ``A``. Then, control will be passed over to ``b``, which either knows how to deal with us and produces a result, or does not and returns `NotImplemented`, raising a ``TypeError``. If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__`` will be called, but now it sees an ``ndarray`` as the other argument. Likely, it will know how to handle this, and return a new instance of the ``B`` class to us. Our example class is not set up to handle this, but it might well be the best approach if, e.g., one were to re-implement ``MaskedArray`` using ``__array_ufunc__``. As a final note: if the ``super`` route is suited to a given class, an advantage of using it is that it helps in constructing class hierarchies. E.g., suppose that our other class ``B`` also used the ``super`` in its ``__array_ufunc__`` implementation, and we created a class ``C`` that depended on both, i.e., ``class C(A, B)`` (with, for simplicity, not another ``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to ``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to ``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate. .. _array-wrap: ``__array_wrap__`` for ufuncs and other functions ------------------------------------------------- Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using ``__array_wrap__`` and ``__array_prepare__``. These two allowed one to change the output type of a ufunc, but, in contrast to ``__array_ufunc__``, did not allow one to make any changes to the inputs. It is hoped to eventually deprecate these, but ``__array_wrap__`` is also used by other numpy functions and methods, such as ``squeeze``, so at the present time is still needed for full functionality. Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of allowing a subclass to set the type of the return value and update attributes and metadata. Let's show how this works with an example. First we return to the simpler example subclass, but with a different name and some print statements: .. testcode:: import numpy as np class MySubClass(np.ndarray): def __new__(cls, input_array, info=None): obj = np.asarray(input_array).view(cls) obj.info = info return obj def __array_finalize__(self, obj): print('In __array_finalize__:') print(' self is %s' % repr(self)) print(' obj is %s' % repr(obj)) if obj is None: return self.info = getattr(obj, 'info', None) def __array_wrap__(self, out_arr, context=None): print('In __array_wrap__:') print(' self is %s' % repr(self)) print(' arr is %s' % repr(out_arr)) # then just call the parent return super(MySubClass, self).__array_wrap__(self, out_arr, context) We run a ufunc on an instance of our new array: >>> obj = MySubClass(np.arange(5), info='spam') In __array_finalize__: self is MySubClass([0, 1, 2, 3, 4]) obj is array([0, 1, 2, 3, 4]) >>> arr2 = np.arange(5)+1 >>> ret = np.add(arr2, obj) In __array_wrap__: self is MySubClass([0, 1, 2, 3, 4]) arr is array([1, 3, 5, 7, 9]) In __array_finalize__: self is MySubClass([1, 3, 5, 7, 9]) obj is MySubClass([0, 1, 2, 3, 4]) >>> ret MySubClass([1, 3, 5, 7, 9]) >>> ret.info 'spam' Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result of the addition. In turn, the default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, and called ``__array_finalize__`` - hence the copying of the ``info`` attribute. This has all happened at the C level. But, we could do anything we wanted: .. testcode:: class SillySubClass(np.ndarray): def __array_wrap__(self, arr, context=None): return 'I lost your data' >>> arr1 = np.arange(5) >>> obj = arr1.view(SillySubClass) >>> arr2 = np.arange(5) >>> ret = np.multiply(obj, arr2) >>> ret 'I lost your data' So, by defining a specific ``__array_wrap__`` method for our subclass, we can tweak the output from ufuncs. The ``__array_wrap__`` method requires ``self``, then an argument - which is the result of the ufunc - and an optional parameter *context*. This parameter is returned by ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, domain of the ufunc), but is not set by other numpy functions. Though, as seen above, it is possible to do otherwise, ``__array_wrap__`` should return an instance of its containing class. See the masked array subclass for an implementation. In addition to ``__array_wrap__``, which is called on the way out of the ufunc, there is also an ``__array_prepare__`` method which is called on the way into the ufunc, after the output arrays are created but before any computation has been performed. The default implementation does nothing but pass through the array. ``__array_prepare__`` should not attempt to access the array data or resize the array, it is intended for setting the output array type, updating attributes and metadata, and performing any checks based on the input that may be desired before computation begins. Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or subclass thereof or raise an error. Extra gotchas - custom ``__del__`` methods and ndarray.base ----------------------------------------------------------- One of the problems that ndarray solves is keeping track of memory ownership of ndarrays and their views. Consider the case where we have created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. The two objects are looking at the same memory. NumPy keeps track of where the data came from for a particular array or view, with the ``base`` attribute: >>> # A normal ndarray, that owns its own data >>> arr = np.zeros((4,)) >>> # In this case, base is None >>> arr.base is None True >>> # We take a view >>> v1 = arr[1:] >>> # base now points to the array that it derived from >>> v1.base is arr True >>> # Take a view of a view >>> v2 = v1[1:] >>> # base points to the view it derived from >>> v2.base is v1 True In general, if the array owns its own memory, as for ``arr`` in this case, then ``arr.base`` will be None - there are some exceptions to this - see the numpy book for more details. The ``base`` attribute is useful in being able to tell whether we have a view or the original array. This in turn can be useful if we need to know whether or not to do some specific cleanup when the subclassed array is deleted. For example, we may only want to do the cleanup if the original array is deleted, but not the views. For an example of how this can work, have a look at the ``memmap`` class in ``numpy.core``. Subclassing and Downstream Compatibility ---------------------------------------- When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray`` interface, it is your responsibility to decide how aligned your APIs will be with those of numpy. For convenience, many numpy functions that have a corresponding ``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking if the first argument to a function has a method of the same name. If it exists, the method is called instead of coercing the arguments to a numpy array. For example, if you want your sub-class or duck-type to be compatible with numpy's ``sum`` function, the method signature for this object's ``sum`` method should be the following: .. testcode:: def sum(self, axis=None, dtype=None, out=None, keepdims=False): ... This is the exact same method signature for ``np.sum``, so now if a user calls ``np.sum`` on this object, numpy will call the object's own ``sum`` method and pass in these arguments enumerated above in the signature, and no errors will be raised because the signatures are completely compatible with each other. If, however, you decide to deviate from this signature and do something like this: .. testcode:: def sum(self, axis=None, dtype=None): ... This object is no longer compatible with ``np.sum`` because if you call ``np.sum``, it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError to be raised. If you wish to maintain compatibility with numpy and its subsequent versions (which might add new keyword arguments) but do not want to surface all of numpy's arguments, your function's signature should accept ``**kwargs``. For example: .. testcode:: def sum(self, axis=None, dtype=None, **unused_kwargs): ... This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. """ from __future__ import division, absolute_import, print_function
from __future__ import division, absolute_import, print_function import sys import os import shutil import mmap import pytest from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp from numpy import ( memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) from numpy.compat import Path from numpy import arange, allclose, asarray from numpy.testing import ( assert_, assert_equal, assert_array_equal, suppress_warnings ) class TestMemmap(object): def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.tempdir = mkdtemp() self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) def teardown(self): self.tmpfp.close() shutil.rmtree(self.tempdir) def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp # Test __del__ machinery, which handles cleanup # Read data back from file newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', shape=self.shape) assert_(allclose(self.data, newfp)) assert_array_equal(self.data, newfp) assert_equal(newfp.flags.writeable, False) def test_open_with_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp def test_unnamed_file(self): with TemporaryFile() as f: fp = memmap(f, dtype=self.dtype, shape=self.shape) del fp def test_attributes(self): offset = 1 mode = "w+" fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, shape=self.shape, offset=offset) assert_equal(offset, fp.offset) assert_equal(mode, fp.mode) del fp def test_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) abspath = os.path.abspath(tmpname) fp[:] = self.data[:] assert_equal(abspath, fp.filename) b = fp[:1] assert_equal(abspath, b.filename) del b del fp @pytest.mark.skipif(Path is None, reason="No pathlib.Path") def test_path(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', shape=self.shape) # os.path.realpath does not resolve symlinks on Windows # see: https://bugs.python.org/issue9949 # use Path.resolve, just as memmap class does internally abspath = str(Path(tmpname).resolve()) fp[:] = self.data[:] assert_equal(abspath, str(fp.filename.resolve())) b = fp[:1] assert_equal(abspath, str(b.filename.resolve())) del b del fp def test_filename_fileobj(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", shape=self.shape) assert_equal(fp.filename, self.tmpfp.name) @pytest.mark.skipif(sys.platform == 'gnu0', reason="Known to fail on hurd") def test_flush(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] assert_equal(fp[0], self.data[0]) fp.flush() def test_del(self): # Make sure a view does not delete the underlying mmap fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp_base[0] = 5 fp_view = fp_base[0:1] assert_equal(fp_view[0], 5) del fp_view # Should still be able to access and assign values after # deleting the view assert_equal(fp_base[0], 5) fp_base[0] = 6 assert_equal(fp_base[0], 6) def test_arithmetic_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = (fp + 10) if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_indexing_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = fp[(1, 2), (2, 3)] if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_slicing_keeps_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) assert_(fp[:2, :2]._mmap is fp._mmap) def test_view(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) new1 = fp.view() new2 = new1.view() assert_(new1.base is fp) assert_(new2.base is fp) new_array = asarray(fp) assert_(new_array.base is fp) def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data with suppress_warnings() as sup: sup.filter(FutureWarning, "np.average currently does not preserve") for unary_op in [sum, average, product]: result = unary_op(fp) assert_(isscalar(result)) assert_(result.__class__ is self.data[0, 0].__class__) assert_(unary_op(fp, axis=0).__class__ is ndarray) assert_(unary_op(fp, axis=1).__class__ is ndarray) for binary_op in [add, subtract, multiply]: assert_(binary_op(fp, self.data).__class__ is ndarray) assert_(binary_op(self.data, fp).__class__ is ndarray) assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 assert(fp.__class__ is memmap) add(fp, 1, out=fp) assert(fp.__class__ is memmap) def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data assert_(fp[1:, :-1].__class__ is memmap) # Fancy indexing returns a copy that is not memmapped assert_(fp[[0, 1]].__class__ is ndarray) def test_memmap_subclass(self): class MemmapSubClass(memmap): pass fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the # ufunc and __getitem__ output is never turned into a ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) assert(fp[[0, 1]].__class__ is MemmapSubClass) def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY offset = mmap.ALLOCATIONGRANULARITY + 1 fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) def test_no_shape(self): self.tmpfp.write(b'a'*16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): memmap(self.tmpfp, shape=(0,4), mode='w+') self.tmpfp.write(b'\0') # ok now the file is not empty memmap(self.tmpfp, shape=(0,4), mode='w+')
pizzathief/numpy
numpy/core/tests/test_memmap.py
numpy/doc/subclassing.py
""" Basic functions used by several sub-packages and useful to have in the main name-space. Type Handling ------------- ================ =================== iscomplexobj Test for complex object, scalar result isrealobj Test for real object, scalar result iscomplex Test for complex elements, array result isreal Test for real elements, array result imag Imaginary part real Real part real_if_close Turns complex number with tiny imaginary part to real isneginf Tests for negative infinity, array result isposinf Tests for positive infinity, array result isnan Tests for nans, array result isinf Tests for infinity, array result isfinite Tests for finite numbers, array result isscalar True if argument is a scalar nan_to_num Replaces NaN's with 0 and infinities with large numbers cast Dictionary of functions to force cast to each type common_type Determine the minimum common type code for a group of arrays mintypecode Return minimal allowed common typecode. ================ =================== Index Tricks ------------ ================ =================== mgrid Method which allows easy construction of N-d 'mesh-grids' ``r_`` Append and construct arrays: turns slice objects into ranges and concatenates them, for 2d arrays appends rows. index_exp Konrad Hinsen's index_expression class instance which can be useful for building complicated slicing syntax. ================ =================== Useful Functions ---------------- ================ =================== select Extension of where to multiple conditions and choices extract Extract 1d array from flattened array according to mask insert Insert 1d array of values into Nd array according to mask linspace Evenly spaced samples in linear space logspace Evenly spaced samples in logarithmic space fix Round x to nearest integer towards zero mod Modulo mod(x,y) = x % y except keeps sign of y amax Array maximum along axis amin Array minimum along axis ptp Array max-min along axis cumsum Cumulative sum along axis prod Product of elements along axis cumprod Cumluative product along axis diff Discrete differences along axis angle Returns angle of complex argument unwrap Unwrap phase along given axis (1-d algorithm) sort_complex Sort a complex-array (based on real, then imaginary) trim_zeros Trim the leading and trailing zeros from 1D array. vectorize A class that wraps a Python function taking scalar arguments into a generalized function which can handle arrays of arguments using the broadcast rules of numerix Python. ================ =================== Shape Manipulation ------------------ ================ =================== squeeze Return a with length-one dimensions removed. atleast_1d Force arrays to be >= 1D atleast_2d Force arrays to be >= 2D atleast_3d Force arrays to be >= 3D vstack Stack arrays vertically (row on row) hstack Stack arrays horizontally (column on column) column_stack Stack 1D arrays as columns into 2D array dstack Stack arrays depthwise (along third dimension) stack Stack arrays along a new axis split Divide array into a list of sub-arrays hsplit Split into columns vsplit Split into rows dsplit Split along third dimension ================ =================== Matrix (2D Array) Manipulations ------------------------------- ================ =================== fliplr 2D array with columns flipped flipud 2D array with rows flipped rot90 Rotate a 2D array a multiple of 90 degrees eye Return a 2D array with ones down a given diagonal diag Construct a 2D array from a vector, or return a given diagonal from a 2D array. mat Construct a Matrix bmat Build a Matrix from blocks ================ =================== Polynomials ----------- ================ =================== poly1d A one-dimensional polynomial class poly Return polynomial coefficients from roots roots Find roots of polynomial given coefficients polyint Integrate polynomial polyder Differentiate polynomial polyadd Add polynomials polysub Subtract polynomials polymul Multiply polynomials polydiv Divide polynomials polyval Evaluate polynomial at given argument ================ =================== Iterators --------- ================ =================== Arrayterator A buffered iterator for big arrays. ================ =================== Import Tricks ------------- ================ =================== ppimport Postpone module import until trying to use it ppimport_attr Postpone module import until trying to use its attribute ppresolve Import postponed module and return it. ================ =================== Machine Arithmetics ------------------- ================ =================== machar_single Single precision floating point arithmetic parameters machar_double Double precision floating point arithmetic parameters ================ =================== Threading Tricks ---------------- ================ =================== ParallelExec Execute commands in parallel thread. ================ =================== Array Set Operations ----------------------- Set operations for numeric arrays based on sort() function. ================ =================== unique Unique elements of an array. isin Test whether each element of an ND array is present anywhere within a second array. ediff1d Array difference (auxiliary function). intersect1d Intersection of 1D arrays with unique elements. setxor1d Set exclusive-or of 1D arrays with unique elements. in1d Test whether elements in a 1D array are also present in another array. union1d Union of 1D arrays with unique elements. setdiff1d Set difference of 1D arrays with unique elements. ================ =================== """ from __future__ import division, absolute_import, print_function depends = ['core', 'testing'] global_symbols = ['*']
from __future__ import division, absolute_import, print_function import sys import os import shutil import mmap import pytest from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp from numpy import ( memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) from numpy.compat import Path from numpy import arange, allclose, asarray from numpy.testing import ( assert_, assert_equal, assert_array_equal, suppress_warnings ) class TestMemmap(object): def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.tempdir = mkdtemp() self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) def teardown(self): self.tmpfp.close() shutil.rmtree(self.tempdir) def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp # Test __del__ machinery, which handles cleanup # Read data back from file newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', shape=self.shape) assert_(allclose(self.data, newfp)) assert_array_equal(self.data, newfp) assert_equal(newfp.flags.writeable, False) def test_open_with_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp def test_unnamed_file(self): with TemporaryFile() as f: fp = memmap(f, dtype=self.dtype, shape=self.shape) del fp def test_attributes(self): offset = 1 mode = "w+" fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, shape=self.shape, offset=offset) assert_equal(offset, fp.offset) assert_equal(mode, fp.mode) del fp def test_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) abspath = os.path.abspath(tmpname) fp[:] = self.data[:] assert_equal(abspath, fp.filename) b = fp[:1] assert_equal(abspath, b.filename) del b del fp @pytest.mark.skipif(Path is None, reason="No pathlib.Path") def test_path(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', shape=self.shape) # os.path.realpath does not resolve symlinks on Windows # see: https://bugs.python.org/issue9949 # use Path.resolve, just as memmap class does internally abspath = str(Path(tmpname).resolve()) fp[:] = self.data[:] assert_equal(abspath, str(fp.filename.resolve())) b = fp[:1] assert_equal(abspath, str(b.filename.resolve())) del b del fp def test_filename_fileobj(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", shape=self.shape) assert_equal(fp.filename, self.tmpfp.name) @pytest.mark.skipif(sys.platform == 'gnu0', reason="Known to fail on hurd") def test_flush(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] assert_equal(fp[0], self.data[0]) fp.flush() def test_del(self): # Make sure a view does not delete the underlying mmap fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp_base[0] = 5 fp_view = fp_base[0:1] assert_equal(fp_view[0], 5) del fp_view # Should still be able to access and assign values after # deleting the view assert_equal(fp_base[0], 5) fp_base[0] = 6 assert_equal(fp_base[0], 6) def test_arithmetic_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = (fp + 10) if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_indexing_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = fp[(1, 2), (2, 3)] if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_slicing_keeps_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) assert_(fp[:2, :2]._mmap is fp._mmap) def test_view(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) new1 = fp.view() new2 = new1.view() assert_(new1.base is fp) assert_(new2.base is fp) new_array = asarray(fp) assert_(new_array.base is fp) def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data with suppress_warnings() as sup: sup.filter(FutureWarning, "np.average currently does not preserve") for unary_op in [sum, average, product]: result = unary_op(fp) assert_(isscalar(result)) assert_(result.__class__ is self.data[0, 0].__class__) assert_(unary_op(fp, axis=0).__class__ is ndarray) assert_(unary_op(fp, axis=1).__class__ is ndarray) for binary_op in [add, subtract, multiply]: assert_(binary_op(fp, self.data).__class__ is ndarray) assert_(binary_op(self.data, fp).__class__ is ndarray) assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 assert(fp.__class__ is memmap) add(fp, 1, out=fp) assert(fp.__class__ is memmap) def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data assert_(fp[1:, :-1].__class__ is memmap) # Fancy indexing returns a copy that is not memmapped assert_(fp[[0, 1]].__class__ is ndarray) def test_memmap_subclass(self): class MemmapSubClass(memmap): pass fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the # ufunc and __getitem__ output is never turned into a ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) assert(fp[[0, 1]].__class__ is MemmapSubClass) def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY offset = mmap.ALLOCATIONGRANULARITY + 1 fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) def test_no_shape(self): self.tmpfp.write(b'a'*16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): memmap(self.tmpfp, shape=(0,4), mode='w+') self.tmpfp.write(b'\0') # ok now the file is not empty memmap(self.tmpfp, shape=(0,4), mode='w+')
pizzathief/numpy
numpy/core/tests/test_memmap.py
numpy/lib/info.py
#!/usr/bin/env python """ C declarations, CPP macros, and C functions for f2py2e. Only required declarations/macros/functions will be used. Copyright 1999,2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/05/06 11:42:34 $ Pearu Peterson """ from __future__ import division, absolute_import, print_function import sys import copy from . import __version__ f2py_version = __version__.version errmess = sys.stderr.write ##################### Definitions ################## outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], 'userincludes': [], 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], 'commonhooks': []} needs = {} includes0 = {'includes0': '/*need_includes0*/'} includes = {'includes': '/*need_includes*/'} userincludes = {'userincludes': '/*need_userincludes*/'} typedefs = {'typedefs': '/*need_typedefs*/'} typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} cppmacros = {'cppmacros': '/*need_cppmacros*/'} cfuncs = {'cfuncs': '/*need_cfuncs*/'} callbacks = {'callbacks': '/*need_callbacks*/'} f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', 'initf90modhooksstatic': '/*initf90modhooksstatic*/', 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', } commonhooks = {'commonhooks': '/*need_commonhooks*/', 'initcommonhooks': '/*need_initcommonhooks*/', } ############ Includes ################### includes0['math.h'] = '#include <math.h>' includes0['string.h'] = '#include <string.h>' includes0['setjmp.h'] = '#include <setjmp.h>' includes['Python.h'] = '#include "Python.h"' needs['arrayobject.h'] = ['Python.h'] includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API #include "arrayobject.h"''' includes['arrayobject.h'] = '#include "fortranobject.h"' includes['stdarg.h'] = '#include <stdarg.h>' ############# Type definitions ############### typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' typedefs['signed_char'] = 'typedef signed char signed_char;' typedefs['long_long'] = """\ #ifdef _WIN32 typedef __int64 long_long; #else typedef long long long_long; typedef unsigned long long unsigned_long_long; #endif """ typedefs['unsigned_long_long'] = """\ #ifdef _WIN32 typedef __uint64 long_long; #else typedef unsigned long long unsigned_long_long; #endif """ typedefs['long_double'] = """\ #ifndef _LONG_DOUBLE typedef long double long_double; #endif """ typedefs[ 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' typedefs['string'] = """typedef char * string;""" ############### CPP macros #################### cppmacros['CFUNCSMESS'] = """\ #ifdef DEBUGCFUNCS #define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); #define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ fprintf(stderr,\"\\n\"); #else #define CFUNCSMESS(mess) #define CFUNCSMESSPY(mess,obj) #endif """ cppmacros['F_FUNC'] = """\ #if defined(PREPEND_FORTRAN) #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) _##F #else #define F_FUNC(f,F) _##f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) _##F##_ #else #define F_FUNC(f,F) _##f##_ #endif #endif #else #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) F #else #define F_FUNC(f,F) f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_FUNC(f,F) F##_ #else #define F_FUNC(f,F) f##_ #endif #endif #endif #if defined(UNDERSCORE_G77) #define F_FUNC_US(f,F) F_FUNC(f##_,F##_) #else #define F_FUNC_US(f,F) F_FUNC(f,F) #endif """ cppmacros['F_WRAPPEDFUNC'] = """\ #if defined(PREPEND_FORTRAN) #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F #else #define F_WRAPPEDFUNC(f,F) _f2pywrap##f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ #else #define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ #endif #endif #else #if defined(NO_APPEND_FORTRAN) #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) F2PYWRAP##F #else #define F_WRAPPEDFUNC(f,F) f2pywrap##f #endif #else #if defined(UPPERCASE_FORTRAN) #define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ #else #define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ #endif #endif #endif #if defined(UNDERSCORE_G77) #define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) #else #define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) #endif """ cppmacros['F_MODFUNC'] = """\ #if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ #if defined(NO_APPEND_FORTRAN) #define F_MODFUNCNAME(m,f) $ ## m ## $ ## f #else #define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ #endif #endif #if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ #if defined(NO_APPEND_FORTRAN) #define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f #else #define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ #endif #endif #if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ #if defined(NO_APPEND_FORTRAN) #define F_MODFUNCNAME(m,f) f ## .in. ## m #else #define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ #endif #endif /* #if defined(UPPERCASE_FORTRAN) #define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) #else #define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) #endif */ #define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) """ cppmacros['SWAPUNSAFE'] = """\ #define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) """ cppmacros['SWAP'] = """\ #define SWAP(a,b,t) {\\ t *c;\\ c = a;\\ a = b;\\ b = c;} """ # cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & # NPY_ARRAY_C_CONTIGUOUS)' cppmacros['PRINTPYOBJERR'] = """\ #define PRINTPYOBJERR(obj)\\ fprintf(stderr,\"#modulename#.error is related to \");\\ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ fprintf(stderr,\"\\n\"); """ cppmacros['MINMAX'] = """\ #ifndef max #define max(a,b) ((a > b) ? (a) : (b)) #endif #ifndef min #define min(a,b) ((a < b) ? (a) : (b)) #endif #ifndef MAX #define MAX(a,b) ((a > b) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) ((a < b) ? (a) : (b)) #endif """ needs['len..'] = ['f2py_size'] cppmacros['len..'] = """\ #define rank(var) var ## _Rank #define shape(var,dim) var ## _Dims[dim] #define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp))) #define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim) #define fshape(var,dim) shape(var,rank(var)-dim-1) #define len(var) shape(var,0) #define flen(var) fshape(var,0) #define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp)) /* #define index(i) capi_i ## i */ #define slen(var) capi_ ## var ## _len #define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1) """ needs['f2py_size'] = ['stdarg.h'] cfuncs['f2py_size'] = """\ static int f2py_size(PyArrayObject* var, ...) { npy_int sz = 0; npy_int dim; npy_int rank; va_list argp; va_start(argp, var); dim = va_arg(argp, npy_int); if (dim==-1) { sz = PyArray_SIZE(var); } else { rank = PyArray_NDIM(var); if (dim>=1 && dim<=rank) sz = PyArray_DIM(var, dim-1); else fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank); } va_end(argp); return sz; } """ cppmacros[ 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyInt_FromLong(v))' cppmacros[ 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyInt_FromLong(v))' needs['pyobj_from_int1'] = ['signed_char'] cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyInt_FromLong(v))' cppmacros[ 'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))' needs['pyobj_from_long_long1'] = ['long_long'] cppmacros['pyobj_from_long_long1'] = """\ #ifdef HAVE_LONG_LONG #define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) #else #warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. #define pyobj_from_long_long1(v) (PyLong_FromLong(v)) #endif """ needs['pyobj_from_long_double1'] = ['long_double'] cppmacros[ 'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))' cppmacros[ 'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))' cppmacros[ 'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))' needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] cppmacros[ 'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))' needs['pyobj_from_complex_double1'] = ['complex_double'] cppmacros[ 'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))' needs['pyobj_from_complex_float1'] = ['complex_float'] cppmacros[ 'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))' needs['pyobj_from_string1'] = ['string'] cppmacros[ 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyString_FromString((char *)v))' needs['pyobj_from_string1size'] = ['string'] cppmacros[ 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUString_FromStringAndSize((char *)v, len))' needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] cppmacros['TRYPYARRAYTEMPLATE'] = """\ /* New SciPy */ #define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; #define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; #define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; #define TRYPYARRAYTEMPLATE(ctype,typecode) \\ PyArrayObject *arr = NULL;\\ if (!obj) return -2;\\ if (!PyArray_Check(obj)) return -1;\\ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ switch (PyArray_TYPE(arr)) {\\ case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\ case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\ case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\ case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\ case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\ case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ default: return -2;\\ };\\ return 1 """ needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ #define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; #define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ PyArrayObject *arr = NULL;\\ if (!obj) return -2;\\ if (!PyArray_Check(obj)) return -1;\\ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ if (PyArray_DESCR(arr)->type==typecode) {\\ *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ return 1;\\ }\\ switch (PyArray_TYPE(arr)) {\\ case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\ case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\ case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ default: return -2;\\ };\\ return -1; """ # cppmacros['NUMFROMARROBJ']="""\ # define NUMFROMARROBJ(typenum,ctype) \\ # if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ # else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ # if (arr) {\\ # if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ # if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ # goto capi_fail;\\ # } else {\\ # (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ # }\\ # if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ # return 1;\\ # } # """ # XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ # cppmacros['CNUMFROMARROBJ']="""\ # define CNUMFROMARROBJ(typenum,ctype) \\ # if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ # else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ # if (arr) {\\ # if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ # if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ # goto capi_fail;\\ # } else {\\ # (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ # }\\ # if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ # return 1;\\ # } # """ needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] cppmacros['GETSTRFROMPYTUPLE'] = """\ #define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ if (rv_cb_str == NULL)\\ goto capi_fail;\\ if (PyString_Check(rv_cb_str)) {\\ str[len-1]='\\0';\\ STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ } else {\\ PRINTPYOBJERR(rv_cb_str);\\ PyErr_SetString(#modulename#_error,\"string object expected\");\\ goto capi_fail;\\ }\\ } """ cppmacros['GETSCALARFROMPYTUPLE'] = """\ #define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ goto capi_fail;\\ } """ cppmacros['FAILNULL'] = """\\ #define FAILNULL(p) do { \\ if ((p) == NULL) { \\ PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ goto capi_fail; \\ } \\ } while (0) """ needs['MEMCOPY'] = ['string.h', 'FAILNULL'] cppmacros['MEMCOPY'] = """\ #define MEMCOPY(to,from,n)\\ do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) """ cppmacros['STRINGMALLOC'] = """\ #define STRINGMALLOC(str,len)\\ if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ goto capi_fail;\\ } else {\\ (str)[len] = '\\0';\\ } """ cppmacros['STRINGFREE'] = """\ #define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) """ needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] cppmacros['STRINGCOPYN'] = """\ #define STRINGCOPYN(to,from,buf_size) \\ do { \\ int _m = (buf_size); \\ char *_to = (to); \\ char *_from = (from); \\ FAILNULL(_to); FAILNULL(_from); \\ (void)strncpy(_to, _from, sizeof(char)*_m); \\ _to[_m-1] = '\\0'; \\ /* Padding with spaces instead of nulls */ \\ for (_m -= 2; _m >= 0 && _to[_m] == '\\0'; _m--) { \\ _to[_m] = ' '; \\ } \\ } while (0) """ needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] cppmacros['STRINGCOPY'] = """\ #define STRINGCOPY(to,from)\\ do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) """ cppmacros['CHECKGENERIC'] = """\ #define CHECKGENERIC(check,tcheck,name) \\ if (!(check)) {\\ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ /*goto capi_fail;*/\\ } else """ cppmacros['CHECKARRAY'] = """\ #define CHECKARRAY(check,tcheck,name) \\ if (!(check)) {\\ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ /*goto capi_fail;*/\\ } else """ cppmacros['CHECKSTRING'] = """\ #define CHECKSTRING(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ PyErr_SetString(#modulename#_error, errstring);\\ /*goto capi_fail;*/\\ } else """ cppmacros['CHECKSCALAR'] = """\ #define CHECKSCALAR(check,tcheck,name,show,var)\\ if (!(check)) {\\ char errstring[256];\\ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ PyErr_SetString(#modulename#_error,errstring);\\ /*goto capi_fail;*/\\ } else """ # cppmacros['CHECKDIMS']="""\ # define CHECKDIMS(dims,rank) \\ # for (int i=0;i<(rank);i++)\\ # if (dims[i]<0) {\\ # fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ # goto capi_fail;\\ # } # """ cppmacros[ 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' cppmacros['OLDPYNUM'] = """\ #ifdef OLDPYNUM #error You need to install NumPy version 13 or higher. See https://scipy.org/install.html #endif """ ################# C functions ############### cfuncs['calcarrindex'] = """\ static int calcarrindex(int *i,PyArrayObject *arr) { int k,ii = i[0]; for (k=1; k < PyArray_NDIM(arr); k++) ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ return ii; }""" cfuncs['calcarrindextr'] = """\ static int calcarrindextr(int *i,PyArrayObject *arr) { int k,ii = i[PyArray_NDIM(arr)-1]; for (k=1; k < PyArray_NDIM(arr); k++) ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ return ii; }""" cfuncs['forcomb'] = """\ static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; static int initforcomb(npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; forcombcache.nd = nd; forcombcache.d = dims; forcombcache.tr = tr; if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; for (k=1;k<nd;k++) { forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0; } forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; return 1; } static int *nextforcomb(void) { int j,*i,*i_tr,k; int nd=forcombcache.nd; if ((i=forcombcache.i) == NULL) return NULL; if ((i_tr=forcombcache.i_tr) == NULL) return NULL; if (forcombcache.d == NULL) return NULL; i[0]++; if (i[0]==forcombcache.d[0]) { j=1; while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++; if (j==nd) { free(i); free(i_tr); return NULL; } for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0; i[j]++; i_tr[nd-j-1]++; } else i_tr[nd-1]++; if (forcombcache.tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] cfuncs['try_pyarr_from_string'] = """\ static int try_pyarr_from_string(PyObject *obj,const string str) { PyArrayObject *arr = NULL; if (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL))) { STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); } return 1; capi_fail: PRINTPYOBJERR(obj); PyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); return 0; } """ needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] cfuncs['string_from_pyobj'] = """\ static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { PyArrayObject *arr = NULL; PyObject *tmp = NULL; #ifdef DEBUGCFUNCS fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj); #endif if (obj == Py_None) { if (*len == -1) *len = strlen(inistr); /* Will this cause problems? */ STRINGMALLOC(*str,*len); STRINGCOPYN(*str,inistr,*len+1); return 1; } if (PyArray_Check(obj)) { if ((arr = (PyArrayObject *)obj) == NULL) goto capi_fail; if (!ISCONTIGUOUS(arr)) { PyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); goto capi_fail; } if (*len == -1) *len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr); STRINGMALLOC(*str,*len); STRINGCOPYN(*str,PyArray_DATA(arr),*len+1); return 1; } if (PyString_Check(obj)) { tmp = obj; Py_INCREF(tmp); } #if PY_VERSION_HEX >= 0x03000000 else if (PyUnicode_Check(obj)) { tmp = PyUnicode_AsASCIIString(obj); } else { PyObject *tmp2; tmp2 = PyObject_Str(obj); if (tmp2) { tmp = PyUnicode_AsASCIIString(tmp2); Py_DECREF(tmp2); } else { tmp = NULL; } } #else else { tmp = PyObject_Str(obj); } #endif if (tmp == NULL) goto capi_fail; if (*len == -1) *len = PyString_GET_SIZE(tmp); STRINGMALLOC(*str,*len); STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); Py_DECREF(tmp); return 1; capi_fail: Py_XDECREF(tmp); { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['char_from_pyobj'] = ['int_from_pyobj'] cfuncs['char_from_pyobj'] = """\ static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { int i=0; if (int_from_pyobj(&i,obj,errmess)) { *v = (char)i; return 1; } return 0; } """ needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] cfuncs['signed_char_from_pyobj'] = """\ static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { int i=0; if (int_from_pyobj(&i,obj,errmess)) { *v = (signed_char)i; return 1; } return 0; } """ needs['short_from_pyobj'] = ['int_from_pyobj'] cfuncs['short_from_pyobj'] = """\ static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { int i=0; if (int_from_pyobj(&i,obj,errmess)) { *v = (short)i; return 1; } return 0; } """ cfuncs['int_from_pyobj'] = """\ static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyInt_Check(obj)) { *v = (int)PyInt_AS_LONG(obj); return 1; } tmp = PyNumber_Int(obj); if (tmp) { *v = PyInt_AS_LONG(tmp); Py_DECREF(tmp); return 1; } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ cfuncs['long_from_pyobj'] = """\ static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyInt_Check(obj)) { *v = PyInt_AS_LONG(obj); return 1; } tmp = PyNumber_Int(obj); if (tmp) { *v = PyInt_AS_LONG(tmp); Py_DECREF(tmp); return 1; } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['long_long_from_pyobj'] = ['long_long'] cfuncs['long_long_from_pyobj'] = """\ static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyLong_Check(obj)) { *v = PyLong_AsLongLong(obj); return (!PyErr_Occurred()); } if (PyInt_Check(obj)) { *v = (long_long)PyInt_AS_LONG(obj); return 1; } tmp = PyNumber_Long(obj); if (tmp) { *v = PyLong_AsLongLong(tmp); Py_DECREF(tmp); return (!PyErr_Occurred()); } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] cfuncs['long_double_from_pyobj'] = """\ static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { double d=0; if (PyArray_CheckScalar(obj)){ if PyArray_IsScalar(obj, LongDouble) { PyArray_ScalarAsCtype(obj, v); return 1; } else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { (*v) = *((npy_longdouble *)PyArray_DATA(obj)); return 1; } } if (double_from_pyobj(&d,obj,errmess)) { *v = (long_double)d; return 1; } return 0; } """ cfuncs['double_from_pyobj'] = """\ static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { PyObject* tmp = NULL; if (PyFloat_Check(obj)) { #ifdef __sgi *v = PyFloat_AsDouble(obj); #else *v = PyFloat_AS_DOUBLE(obj); #endif return 1; } tmp = PyNumber_Float(obj); if (tmp) { #ifdef __sgi *v = PyFloat_AsDouble(tmp); #else *v = PyFloat_AS_DOUBLE(tmp); #endif Py_DECREF(tmp); return 1; } if (PyComplex_Check(obj)) tmp = PyObject_GetAttrString(obj,\"real\"); else if (PyString_Check(obj) || PyUnicode_Check(obj)) /*pass*/; else if (PySequence_Check(obj)) tmp = PySequence_GetItem(obj,0); if (tmp) { PyErr_Clear(); if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} Py_DECREF(tmp); } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = #modulename#_error; PyErr_SetString(err,errmess); } return 0; } """ needs['float_from_pyobj'] = ['double_from_pyobj'] cfuncs['float_from_pyobj'] = """\ static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { double d=0.0; if (double_from_pyobj(&d,obj,errmess)) { *v = (float)d; return 1; } return 0; } """ needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', 'complex_double_from_pyobj'] cfuncs['complex_long_double_from_pyobj'] = """\ static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { complex_double cd={0.0,0.0}; if (PyArray_CheckScalar(obj)){ if PyArray_IsScalar(obj, CLongDouble) { PyArray_ScalarAsCtype(obj, v); return 1; } else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; return 1; } } if (complex_double_from_pyobj(&cd,obj,errmess)) { (*v).r = (long_double)cd.r; (*v).i = (long_double)cd.i; return 1; } return 0; } """ needs['complex_double_from_pyobj'] = ['complex_double'] cfuncs['complex_double_from_pyobj'] = """\ static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { Py_complex c; if (PyComplex_Check(obj)) { c=PyComplex_AsCComplex(obj); (*v).r=c.real, (*v).i=c.imag; return 1; } if (PyArray_IsScalar(obj, ComplexFloating)) { if (PyArray_IsScalar(obj, CFloat)) { npy_cfloat new; PyArray_ScalarAsCtype(obj, &new); (*v).r = (double)new.real; (*v).i = (double)new.imag; } else if (PyArray_IsScalar(obj, CLongDouble)) { npy_clongdouble new; PyArray_ScalarAsCtype(obj, &new); (*v).r = (double)new.real; (*v).i = (double)new.imag; } else { /* if (PyArray_IsScalar(obj, CDouble)) */ PyArray_ScalarAsCtype(obj, v); } return 1; } if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ PyObject *arr; if (PyArray_Check(obj)) { arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); } else { arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); } if (arr==NULL) return 0; (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; return 1; } /* Python does not provide PyNumber_Complex function :-( */ (*v).i=0.0; if (PyFloat_Check(obj)) { #ifdef __sgi (*v).r = PyFloat_AsDouble(obj); #else (*v).r = PyFloat_AS_DOUBLE(obj); #endif return 1; } if (PyInt_Check(obj)) { (*v).r = (double)PyInt_AS_LONG(obj); return 1; } if (PyLong_Check(obj)) { (*v).r = PyLong_AsDouble(obj); return (!PyErr_Occurred()); } if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { PyObject *tmp = PySequence_GetItem(obj,0); if (tmp) { if (complex_double_from_pyobj(v,tmp,errmess)) { Py_DECREF(tmp); return 1; } Py_DECREF(tmp); } } { PyObject* err = PyErr_Occurred(); if (err==NULL) err = PyExc_TypeError; PyErr_SetString(err,errmess); } return 0; } """ needs['complex_float_from_pyobj'] = [ 'complex_float', 'complex_double_from_pyobj'] cfuncs['complex_float_from_pyobj'] = """\ static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { complex_double cd={0.0,0.0}; if (complex_double_from_pyobj(&cd,obj,errmess)) { (*v).r = (float)cd.r; (*v).i = (float)cd.i; return 1; } return 0; } """ needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] cfuncs[ 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] cfuncs[ 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' needs['try_pyarr_from_long_long'] = [ 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] cfuncs[ 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] cfuncs[ 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' needs['try_pyarr_from_complex_float'] = [ 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] cfuncs[ 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' needs['try_pyarr_from_complex_double'] = [ 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] cfuncs[ 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] cfuncs['create_cb_arglist'] = """\ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { PyObject *tmp = NULL; PyObject *tmp_fun = NULL; int tot,opt,ext,siz,i,di=0; CFUNCSMESS(\"create_cb_arglist\\n\"); tot=opt=ext=siz=0; /* Get the total number of arguments */ if (PyFunction_Check(fun)) tmp_fun = fun; else { di = 1; if (PyObject_HasAttrString(fun,\"im_func\")) { tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); } else if (PyObject_HasAttrString(fun,\"__call__\")) { tmp = PyObject_GetAttrString(fun,\"__call__\"); if (PyObject_HasAttrString(tmp,\"im_func\")) tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); else { tmp_fun = fun; /* built-in function */ tot = maxnofargs; if (xa != NULL) tot += PyTuple_Size((PyObject *)xa); } Py_XDECREF(tmp); } else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { tot = maxnofargs; if (xa != NULL) tot += PyTuple_Size((PyObject *)xa); tmp_fun = fun; } else if (F2PyCapsule_Check(fun)) { tot = maxnofargs; if (xa != NULL) ext = PyTuple_Size((PyObject *)xa); if(ext>0) { fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); goto capi_fail; } tmp_fun = fun; } } if (tmp_fun==NULL) { fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); goto capi_fail; } #if PY_VERSION_HEX >= 0x03000000 if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) #else if (PyObject_HasAttrString(tmp_fun,\"func_code\")) { if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) #endif tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; Py_XDECREF(tmp); } /* Get the number of optional arguments */ #if PY_VERSION_HEX >= 0x03000000 if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) #else if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) #endif opt = PyTuple_Size(tmp); Py_XDECREF(tmp); } /* Get the number of extra arguments */ if (xa != NULL) ext = PyTuple_Size((PyObject *)xa); /* Calculate the size of call-backs argument list */ siz = MIN(maxnofargs+ext,tot); *nofargs = MAX(0,siz-ext); #ifdef DEBUGCFUNCS fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); #endif if (siz<tot-opt) { fprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt); goto capi_fail; } /* Initialize argument list */ *args = (PyTupleObject *)PyTuple_New(siz); for (i=0;i<*nofargs;i++) { Py_INCREF(Py_None); PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); } if (xa != NULL) for (i=(*nofargs);i<siz;i++) { tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs)); Py_INCREF(tmp); PyTuple_SET_ITEM(*args,i,tmp); } CFUNCSMESS(\"create_cb_arglist-end\\n\"); return 1; capi_fail: if ((PyErr_Occurred())==NULL) PyErr_SetString(#modulename#_error,errmess); return 0; } """ def buildcfuncs(): from .capi_maps import c2capi_map for k in c2capi_map.keys(): m = 'pyarr_from_p_%s1' % k cppmacros[ m] = '#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))' % (m, c2capi_map[k]) k = 'string' m = 'pyarr_from_p_%s1' % k # NPY_CHAR compatibility, NPY_STRING with itemsize 1 cppmacros[ m] = '#define %s(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' % (m) ############ Auxiliary functions for sorting needs ################### def append_needs(need, flag=1): global outneeds, needs if isinstance(need, list): for n in need: append_needs(n, flag) elif isinstance(need, str): if not need: return if need in includes0: n = 'includes0' elif need in includes: n = 'includes' elif need in typedefs: n = 'typedefs' elif need in typedefs_generated: n = 'typedefs_generated' elif need in cppmacros: n = 'cppmacros' elif need in cfuncs: n = 'cfuncs' elif need in callbacks: n = 'callbacks' elif need in f90modhooks: n = 'f90modhooks' elif need in commonhooks: n = 'commonhooks' else: errmess('append_needs: unknown need %s\n' % (repr(need))) return if need in outneeds[n]: return if flag: tmp = {} if need in needs: for nn in needs[need]: t = append_needs(nn, 0) if isinstance(t, dict): for nnn in t.keys(): if nnn in tmp: tmp[nnn] = tmp[nnn] + t[nnn] else: tmp[nnn] = t[nnn] for nn in tmp.keys(): for nnn in tmp[nn]: if nnn not in outneeds[nn]: outneeds[nn] = [nnn] + outneeds[nn] outneeds[n].append(need) else: tmp = {} if need in needs: for nn in needs[need]: t = append_needs(nn, flag) if isinstance(t, dict): for nnn in t.keys(): if nnn in tmp: tmp[nnn] = t[nnn] + tmp[nnn] else: tmp[nnn] = t[nnn] if n not in tmp: tmp[n] = [] tmp[n].append(need) return tmp else: errmess('append_needs: expected list or string but got :%s\n' % (repr(need))) def get_needs(): global outneeds, needs res = {} for n in outneeds.keys(): out = [] saveout = copy.copy(outneeds[n]) while len(outneeds[n]) > 0: if outneeds[n][0] not in needs: out.append(outneeds[n][0]) del outneeds[n][0] else: flag = 0 for k in outneeds[n][1:]: if k in needs[outneeds[n][0]]: flag = 1 break if flag: outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] else: out.append(outneeds[n][0]) del outneeds[n][0] if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ and outneeds[n] != []: print(n, saveout) errmess( 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') out = out + saveout break saveout = copy.copy(outneeds[n]) if out == []: out = [n] res[n] = out return res
from __future__ import division, absolute_import, print_function import sys import os import shutil import mmap import pytest from tempfile import NamedTemporaryFile, TemporaryFile, mktemp, mkdtemp from numpy import ( memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply) from numpy.compat import Path from numpy import arange, allclose, asarray from numpy.testing import ( assert_, assert_equal, assert_array_equal, suppress_warnings ) class TestMemmap(object): def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.tempdir = mkdtemp() self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) def teardown(self): self.tmpfp.close() shutil.rmtree(self.tempdir) def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp # Test __del__ machinery, which handles cleanup # Read data back from file newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', shape=self.shape) assert_(allclose(self.data, newfp)) assert_array_equal(self.data, newfp) assert_equal(newfp.flags.writeable, False) def test_open_with_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] del fp def test_unnamed_file(self): with TemporaryFile() as f: fp = memmap(f, dtype=self.dtype, shape=self.shape) del fp def test_attributes(self): offset = 1 mode = "w+" fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, shape=self.shape, offset=offset) assert_equal(offset, fp.offset) assert_equal(mode, fp.mode) del fp def test_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) abspath = os.path.abspath(tmpname) fp[:] = self.data[:] assert_equal(abspath, fp.filename) b = fp[:1] assert_equal(abspath, b.filename) del b del fp @pytest.mark.skipif(Path is None, reason="No pathlib.Path") def test_path(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', shape=self.shape) # os.path.realpath does not resolve symlinks on Windows # see: https://bugs.python.org/issue9949 # use Path.resolve, just as memmap class does internally abspath = str(Path(tmpname).resolve()) fp[:] = self.data[:] assert_equal(abspath, str(fp.filename.resolve())) b = fp[:1] assert_equal(abspath, str(b.filename.resolve())) del b del fp def test_filename_fileobj(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", shape=self.shape) assert_equal(fp.filename, self.tmpfp.name) @pytest.mark.skipif(sys.platform == 'gnu0', reason="Known to fail on hurd") def test_flush(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] assert_equal(fp[0], self.data[0]) fp.flush() def test_del(self): # Make sure a view does not delete the underlying mmap fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) fp_base[0] = 5 fp_view = fp_base[0:1] assert_equal(fp_view[0], 5) del fp_view # Should still be able to access and assign values after # deleting the view assert_equal(fp_base[0], 5) fp_base[0] = 6 assert_equal(fp_base[0], 6) def test_arithmetic_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = (fp + 10) if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_indexing_drops_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) tmp = fp[(1, 2), (2, 3)] if isinstance(tmp, memmap): assert_(tmp._mmap is not fp._mmap) def test_slicing_keeps_references(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', shape=self.shape) assert_(fp[:2, :2]._mmap is fp._mmap) def test_view(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) new1 = fp.view() new2 = new1.view() assert_(new1.base is fp) assert_(new2.base is fp) new_array = asarray(fp) assert_(new_array.base is fp) def test_ufunc_return_ndarray(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data with suppress_warnings() as sup: sup.filter(FutureWarning, "np.average currently does not preserve") for unary_op in [sum, average, product]: result = unary_op(fp) assert_(isscalar(result)) assert_(result.__class__ is self.data[0, 0].__class__) assert_(unary_op(fp, axis=0).__class__ is ndarray) assert_(unary_op(fp, axis=1).__class__ is ndarray) for binary_op in [add, subtract, multiply]: assert_(binary_op(fp, self.data).__class__ is ndarray) assert_(binary_op(self.data, fp).__class__ is ndarray) assert_(binary_op(fp, fp).__class__ is ndarray) fp += 1 assert(fp.__class__ is memmap) add(fp, 1, out=fp) assert(fp.__class__ is memmap) def test_getitem(self): fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data assert_(fp[1:, :-1].__class__ is memmap) # Fancy indexing returns a copy that is not memmapped assert_(fp[[0, 1]].__class__ is ndarray) def test_memmap_subclass(self): class MemmapSubClass(memmap): pass fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) fp[:] = self.data # We keep previous behavior for subclasses of memmap, i.e. the # ufunc and __getitem__ output is never turned into a ndarray assert_(sum(fp, axis=0).__class__ is MemmapSubClass) assert_(sum(fp).__class__ is MemmapSubClass) assert_(fp[1:, :-1].__class__ is MemmapSubClass) assert(fp[[0, 1]].__class__ is MemmapSubClass) def test_mmap_offset_greater_than_allocation_granularity(self): size = 5 * mmap.ALLOCATIONGRANULARITY offset = mmap.ALLOCATIONGRANULARITY + 1 fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) def test_no_shape(self): self.tmpfp.write(b'a'*16) mm = memmap(self.tmpfp, dtype='float64') assert_equal(mm.shape, (2,)) def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): memmap(self.tmpfp, shape=(0,4), mode='w+') self.tmpfp.write(b'\0') # ok now the file is not empty memmap(self.tmpfp, shape=(0,4), mode='w+')
pizzathief/numpy
numpy/core/tests/test_memmap.py
numpy/f2py/cfuncs.py
from collections.abc import Iterable from numbers import Real, Integral import numpy as np import openmc.checkvalue as cv from openmc.stats import Tabular, Univariate from .angle_energy import AngleEnergy from .endf import get_tab2_record, get_tab1_record class LaboratoryAngleEnergy(AngleEnergy): """Laboratory angle-energy distribution Parameters ---------- breakpoints : Iterable of int Breakpoints defining interpolation regions interpolation : Iterable of int Interpolation codes energy : Iterable of float Incoming energies at which distributions exist mu : Iterable of openmc.stats.Univariate Distribution of scattering cosines for each incoming energy energy_out : Iterable of Iterable of openmc.stats.Univariate Distribution of outgoing energies for each incoming energy/scattering cosine Attributes ---------- breakpoints : Iterable of int Breakpoints defining interpolation regions interpolation : Iterable of int Interpolation codes energy : Iterable of float Incoming energies at which distributions exist mu : Iterable of openmc.stats.Univariate Distribution of scattering cosines for each incoming energy energy_out : Iterable of Iterable of openmc.stats.Univariate Distribution of outgoing energies for each incoming energy/scattering cosine """ def __init__(self, breakpoints, interpolation, energy, mu, energy_out): super().__init__() self.breakpoints = breakpoints self.interpolation = interpolation self.energy = energy self.mu = mu self.energy_out = energy_out @property def breakpoints(self): return self._breakpoints @property def interpolation(self): return self._interpolation @property def energy(self): return self._energy @property def mu(self): return self._mu @property def energy_out(self): return self._energy_out @breakpoints.setter def breakpoints(self, breakpoints): cv.check_type('laboratory angle-energy breakpoints', breakpoints, Iterable, Integral) self._breakpoints = breakpoints @interpolation.setter def interpolation(self, interpolation): cv.check_type('laboratory angle-energy interpolation', interpolation, Iterable, Integral) self._interpolation = interpolation @energy.setter def energy(self, energy): cv.check_type('laboratory angle-energy incoming energy', energy, Iterable, Real) self._energy = energy @mu.setter def mu(self, mu): cv.check_type('laboratory angle-energy outgoing cosine', mu, Iterable, Univariate) self._mu = mu @energy_out.setter def energy_out(self, energy_out): cv.check_iterable_type('laboratory angle-energy outgoing energy', energy_out, Univariate, 2, 2) self._energy_out = energy_out @classmethod def from_endf(cls, file_obj): """Generate laboratory angle-energy distribution from an ENDF evaluation Parameters ---------- file_obj : file-like object ENDF file positioned at the start of a section for a correlated angle-energy distribution Returns ------- openmc.data.LaboratoryAngleEnergy Laboratory angle-energy distribution """ params, tab2 = get_tab2_record(file_obj) ne = params[5] energy = np.zeros(ne) mu = [] energy_out = [] for i in range(ne): params, _ = get_tab2_record(file_obj) energy[i] = params[1] n_mu = params[5] mu_i = np.zeros(n_mu) p_mu_i = np.zeros(n_mu) energy_out_i = [] for j in range(n_mu): params, f = get_tab1_record(file_obj) mu_i[j] = params[1] p_mu_i[j] = sum(f.y) energy_out_i.append(Tabular(f.x, f.y)) mu.append(Tabular(mu_i, p_mu_i)) energy_out.append(energy_out_i) return cls(tab2.breakpoints, tab2.interpolation, energy, mu, energy_out) def to_hdf5(self, group): raise NotImplementedError
#!/usr/bin/env python from collections.abc import Mapping, Callable import os from pathlib import Path import numpy as np import pandas as pd import pytest import openmc.data @pytest.fixture(scope='module') def elements_endf(): """Dictionary of element ENDF data indexed by atomic symbol.""" endf_data = os.environ['OPENMC_ENDF_DATA'] elements = {'H': 1, 'O': 8, 'Al': 13, 'Cu': 29, 'Ag': 47, 'U': 92, 'Pu': 94} data = {} for symbol, Z in elements.items(): p_file = 'photoat-{:03}_{}_000.endf'.format(Z, symbol) p_path = os.path.join(endf_data, 'photoat', p_file) a_file = 'atom-{:03}_{}_000.endf'.format(Z, symbol) a_path = os.path.join(endf_data, 'atomic_relax', a_file) data[symbol] = openmc.data.IncidentPhoton.from_endf(p_path, a_path) return data @pytest.fixture() def element(request, elements_endf): """Element ENDF data""" return elements_endf[request.param] @pytest.mark.parametrize( 'element, atomic_number', [ ('Al', 13), ('Cu', 29), ('Pu', 94) ], indirect=['element'] ) def test_attributes(element, atomic_number): assert element.atomic_number == atomic_number @pytest.mark.parametrize( 'element, subshell, binding_energy, num_electrons', [ ('H', 'K', 13.61, 1.0), ('O', 'L3', 14.15, 2.67), ('U', 'P2', 34.09, 2.0) ], indirect=['element'] ) def test_atomic_relaxation(element, subshell, binding_energy, num_electrons): atom_relax = element.atomic_relaxation assert isinstance(atom_relax, openmc.data.photon.AtomicRelaxation) assert subshell in atom_relax.subshells assert atom_relax.binding_energy[subshell] == binding_energy assert atom_relax.num_electrons[subshell] == num_electrons @pytest.mark.parametrize('element', ['Al', 'Cu', 'Pu'], indirect=True) def test_transitions(element): transitions = element.atomic_relaxation.transitions assert transitions assert isinstance(transitions, Mapping) for matrix in transitions.values(): assert isinstance(matrix, pd.core.frame.DataFrame) assert len(matrix.columns) == 4 assert sum(matrix['probability']) == pytest.approx(1.0) @pytest.mark.parametrize( 'element, I, i_shell, ionization_energy, num_electrons', [ ('H', 19.2, 0, 13.6, 1), ('O', 95.0, 2, 13.62, 4), ('U', 890.0, 25, 6.033, -3) ], indirect=['element'] ) def test_bremsstrahlung(element, I, i_shell, ionization_energy, num_electrons): brems = element.bremsstrahlung assert isinstance(brems, Mapping) assert brems['I'] == I assert brems['num_electrons'][i_shell] == num_electrons assert brems['ionization_energy'][i_shell] == ionization_energy assert np.all(np.diff(brems['electron_energy']) > 0.0) assert np.all(np.diff(brems['photon_energy']) > 0.0) assert brems['photon_energy'][0] == 0.0 assert brems['photon_energy'][-1] == 1.0 assert brems['dcs'].shape == (200, 30) @pytest.mark.parametrize( 'element, n_shell', [ ('H', 1), ('O', 3), ('Al', 5) ], indirect=['element'] ) def test_compton_profiles(element, n_shell): profile = element.compton_profiles assert profile assert isinstance(profile, Mapping) assert all(isinstance(x, Callable) for x in profile['J']) assert all(len(x) == n_shell for x in profile.values()) @pytest.mark.parametrize( 'element, reaction', [ ('Cu', 541), ('Ag', 502), ('Pu', 504) ], indirect=['element'] ) def test_reactions(element, reaction): reactions = element.reactions assert all(isinstance(x, openmc.data.PhotonReaction) for x in reactions.values()) assert reaction in reactions with pytest.raises(KeyError): reactions[18] @pytest.mark.parametrize('element', ['Pu'], indirect=True) def test_export_to_hdf5(tmpdir, element): filename = str(tmpdir.join('tmp.h5')) element.export_to_hdf5(filename) assert os.path.exists(filename) # Read in data from hdf5 element2 = openmc.data.IncidentPhoton.from_hdf5(filename) # Check for some cross section and datasets of element and element2 energy = np.logspace(np.log10(1.0), np.log10(1.0e10), num=100) for mt in (502, 504, 515, 517, 522, 541, 570): xs = element[mt].xs(energy) xs2 = element2[mt].xs(energy) assert np.allclose(xs, xs2) assert element[502].scattering_factor == element2[502].scattering_factor assert element.atomic_relaxation.transitions['O3'].equals( element2.atomic_relaxation.transitions['O3']) assert (element.compton_profiles['binding_energy'] == element2.compton_profiles['binding_energy']).all() assert (element.bremsstrahlung['electron_energy'] == element2.bremsstrahlung['electron_energy']).all() # Export to hdf5 again element2.export_to_hdf5(filename, 'w') def test_photodat_only(run_in_tmpdir): endf_dir = Path(os.environ['OPENMC_ENDF_DATA']) photoatomic_file = endf_dir / 'photoat' / 'photoat-001_H_000.endf' data = openmc.data.IncidentPhoton.from_endf(photoatomic_file) data.export_to_hdf5('tmp.h5', 'w')
smharper/openmc
tests/unit_tests/test_data_photon.py
openmc/data/laboratory.py
"""Organize MySensors transports.""" import asyncio import logging import threading import serial.threaded _LOGGER = logging.getLogger(__name__) class Transport: """Handle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type. """ # pylint: disable=unused-argument def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs): """Set up transport.""" self._connect = connect self.can_log = False self.connect_task = None self.gateway = gateway self.protocol = None self.reconnect_timeout = reconnect_timeout self.timeout = timeout def disconnect(self): """Disconnect from the transport.""" if not self.protocol or not self.protocol.transport: self.protocol = None # Make sure protocol is None return _LOGGER.info("Disconnecting from gateway") self.protocol.transport.close() self.protocol = None def send(self, message): """Write a message to the gateway.""" if not message or not self.protocol or not self.protocol.transport: return if not self.can_log: _LOGGER.debug("Sending %s", message.strip()) try: self.protocol.transport.write(message.encode()) except OSError as exc: _LOGGER.error( "Failed writing to transport %s: %s", self.protocol.transport, exc ) self.protocol.transport.close() self.protocol.conn_lost_callback() class SyncTransport(Transport): """Sync version of transport class.""" def __init__(self, *args, **kwargs): """Set up transport.""" super().__init__(*args, **kwargs) self._lock = threading.Lock() self.protocol = BaseMySensorsProtocol(self.gateway, self.connect) def connect(self): """Connect to the transport.""" connect_thread = threading.Thread(target=self._connect, args=(self,)) connect_thread.start() def send(self, message): """Write a message to the gateway.""" with self._lock: super().send(message) class AsyncTransport(Transport): """Async version of transport class.""" def __init__(self, *args, loop=None, protocol=None, **kwargs): """Set up transport.""" super().__init__(*args, **kwargs) self.loop = loop or asyncio.get_event_loop() def conn_lost(): """Handle connection_lost in protocol class.""" self.connect_task = self.loop.create_task(self.connect()) if not protocol: protocol = AsyncMySensorsProtocol self.protocol = protocol(self.gateway, conn_lost) async def connect(self): """Connect to the transport.""" await self._connect(self) class BaseMySensorsProtocol(serial.threaded.LineReader): """MySensors base protocol class.""" TERMINATOR = b"\n" def __init__(self, gateway, conn_lost_callback): """Set up base protocol.""" super().__init__() self.gateway = gateway self.conn_lost_callback = conn_lost_callback def __repr__(self): """Return the representation.""" return f"<{self.__class__.__name__}>" def connection_made(self, transport): """Handle created connection.""" super().connection_made(transport) if hasattr(self.transport, "serial"): _LOGGER.info("Connected to %s", self.transport.serial) else: _LOGGER.info("Connected to %s", self.transport) self._connection_made() def handle_line(self, line): """Handle incoming string data one line at a time.""" if not self.gateway.tasks.transport.can_log: _LOGGER.debug("Receiving %s", line) self.gateway.tasks.add_job(self.gateway.logic, line) def connection_lost(self, exc): """Handle lost connection.""" _LOGGER.debug("Connection lost with %s", self.transport.serial) if exc: self.transport.serial.close() self._connection_lost(exc) def _connection_made(self): """Call connection made callbacks.""" if self.gateway.on_conn_made is not None: self.gateway.on_conn_made(self.gateway) def _connection_lost(self, exc): """Call connection lost callbacks.""" if self.gateway.on_conn_lost is not None: self.gateway.on_conn_lost(self.gateway, exc) if exc: _LOGGER.error(exc) self.conn_lost_callback() self.transport = None class AsyncMySensorsProtocol(BaseMySensorsProtocol, asyncio.Protocol): """Async serial protocol class.""" def connection_lost(self, exc): """Handle lost connection.""" _LOGGER.debug("Connection lost with %s", self.transport) self._connection_lost(exc)
"""Test the gateway transport.""" from unittest import mock import pytest from mysensors import Gateway from mysensors.task import SyncTasks from mysensors.transport import BaseMySensorsProtocol, Transport # pylint: disable=redefined-outer-name @pytest.fixture def connection_transport(): """Return a mock connection transport.""" return mock.MagicMock() @pytest.fixture def reconnect_callback(): """Return a mock reconnect callback.""" return mock.MagicMock() @pytest.fixture def gateway(connection_transport, reconnect_callback): """Return gateway instance.""" _gateway = Gateway() protocol = BaseMySensorsProtocol(_gateway, reconnect_callback) def connect(): """Connect to device.""" protocol.connection_made(connection_transport) transport = Transport(gateway, connect) transport.connect = connect transport.protocol = protocol _gateway.tasks = SyncTasks(_gateway.const, False, None, _gateway.sensors, transport) return _gateway def test_connection_made(gateway, connection_transport): """Test connection is made.""" assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport def test_connection_made_callback(gateway, connection_transport): """Test that callbacks are called when connection is made.""" conn_made = mock.MagicMock() gateway.on_conn_made = conn_made assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport assert conn_made.call_count == 1 def test_handle_line(gateway): """Test handle line.""" line = "1;255;0;0;17;1.4.1\n" gateway.tasks.transport.protocol.handle_line(line) gateway.tasks.run_job() assert 1 in gateway.sensors def test_disconnect(gateway, connection_transport): """Test disconnect.""" assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport gateway.tasks.transport.disconnect() assert connection_transport.close.call_count == 1 assert gateway.tasks.transport.protocol is None def test_disconnect_no_connection(gateway, connection_transport): """Test disconnect without active connection.""" assert gateway.tasks.transport.protocol is not None assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.disconnect() assert connection_transport.close.call_count == 0 assert gateway.tasks.transport.protocol is None def test_connection_lost(gateway, connection_transport, reconnect_callback): """Test connection is lost.""" assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport gateway.tasks.transport.protocol.connection_lost("error") assert connection_transport.serial.close.call_count == 1 assert reconnect_callback.call_count == 1 assert gateway.tasks.transport.protocol.transport is None def test_connection_lost_callback(gateway, connection_transport, reconnect_callback): """Test connection is lost and that callbacks are called.""" conn_lost = mock.MagicMock() gateway.on_conn_lost = conn_lost assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport gateway.tasks.transport.protocol.connection_lost("error") assert connection_transport.serial.close.call_count == 1 assert conn_lost.call_count == 1 assert conn_lost.call_args == mock.call(gateway, "error") assert reconnect_callback.call_count == 1 assert gateway.tasks.transport.protocol.transport is None def test_send(gateway, connection_transport): """Test send.""" assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport msg_string = "1;255;3;0;1;123456789\n" gateway.tasks.transport.send(msg_string) assert connection_transport.write.call_count == 1 assert connection_transport.write.call_args == mock.call(msg_string.encode()) def test_send_no_message(gateway, connection_transport): """Test send with falsy message.""" assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport msg_string = "" gateway.tasks.transport.send(msg_string) assert connection_transport.write.call_count == 0 def test_send_no_protocol(gateway, connection_transport): """Test send with no protocol.""" gateway.tasks.transport.protocol = None msg_string = "1;255;3;0;1;123456789\n" gateway.tasks.transport.send(msg_string) assert connection_transport.write.call_count == 0 def test_send_no_transport(gateway, connection_transport): """Test send with no transport.""" assert gateway.tasks.transport.protocol.transport is None msg_string = "1;255;3;0;1;123456789\n" gateway.tasks.transport.send(msg_string) assert connection_transport.write.call_count == 0 def test_send_error(gateway, connection_transport, reconnect_callback): """Test send raises OSError.""" assert gateway.tasks.transport.protocol.transport is None gateway.tasks.transport.connect() assert gateway.tasks.transport.protocol.transport is connection_transport msg_string = "1;255;3;0;1;123456789\n" connection_transport.write = mock.MagicMock(side_effect=OSError()) gateway.tasks.transport.send(msg_string) assert connection_transport.write.call_count == 1 assert connection_transport.close.call_count == 1 assert reconnect_callback.call_count == 1
theolind/pymysensors
tests/test_transport.py
mysensors/transport.py
#!/usr/bin/env python3 """ Copyright (C), 2013, The Schilduil Team. All rights reserved. """ import logging import inspect import time from collections import OrderedDict from operator import add from functools import wraps __all__ = [ "logging", "get_timings", "init_timings", "disable_timings", "timings_report", "loguse", ] timings = None def get_timings(): """ Returns the gathered timings. """ return timings def init_timings(): """ Initiates/Resets the timings. """ global timings timings = {} def disable_timings(): """ Disables the gathering of timings. """ global timings timings = None def add_timing(f, time): """Adds an executing time for a callable to the timings.""" global timings if timings is None: return if f in timings: timings[f] = tuple(map(add, timings[f], (1, time))) else: timings[f] = (1, time) def timings_report(): """Generated a report of the timings of functions. The slowest on average will be first.""" if timings is None: return None report = {} for f, (count, time) in timings.items(): report[f] = time / count sorted_report = OrderedDict() for f in sorted(report, key=report.get, reverse=True): sorted_report[f] = timings[f] + (report[f],) return sorted_report def loguse(param=None): """When in debug it will log entering and exiting a function or object methods. WARNING: Some callables are broken when you use this (e.g. Thread.__init__) Upon entering the callable it will also log all arguments. You can specify arguments to this decorator: a string, an int or a list of string and/or integers. A string causes not log that name argument, an int causes not to log that positional argument. Example: @loguse # will log all arguments. @loguse('arg0') # will log all but not a named argument named 'arg0'. @loguse(0) # will log all but the first positional argument. @loguse(['arg0',0]) # will log all but the first pos. and 'arg0'. """ # Checking the param # If it is a callable then just return what real_loguse(f) would return. # If it is a parameter (even if None) return the real_loguse function. # Param: # It could be None => empty list # It could be a string => list with that string as element # It could be an iterable => ok start_time = time.time() start_time_callable = 0.0 end_time_callable = 0.0 f = None ignore_parameters = [] if param is None: ignore_parameters = [] elif callable(param): f = param elif isinstance(param, str): ignore_parameters = [param] elif isinstance(param, int): ignore_parameters = [param] elif hasattr(param, "__iter__"): ignore_parameters = param # Looking for the classname. classname = "?" try: # We don't want this weird stuff messing in the log decorator # halting our code. And that is a real possibility as this # stuff is in CPtython but does not have to present in other # python implementation. More info on inspect: # http://docs.python.org/3/library/inspect.html classname = inspect.getouterframes(inspect.currentframe())[1][3] except: pass def real_loguse(f): log = logging.getLogger(f.__module__) @wraps(f) def decorator(*args, **kwargs): start_time_logdecorator = time.time() l_args = list(args) l_kwargs = dict(kwargs) if log.isEnabledFor(logging.DEBUG): ignore_parameters.sort(key=str, reverse=True) if ignore_parameters: # Deleting any parameters so they are not logged. for param in ignore_parameters: if isinstance(param, int): try: l_args.pop(param) except: pass else: try: del l_kwargs[str(param)] except: pass if classname == "<module>": log.debug("> %s(%r, %r)", f.__name__, tuple(l_args), l_kwargs) else: log.debug( "> %s.%s(%r, %r)", classname, f.__name__, tuple(l_args), l_kwargs, ) start_time_callable = time.time() result = f(*args, **kwargs) end_time_callable = time.time() add_timing(f, end_time_callable - start_time_callable) if log.isEnabledFor(logging.DEBUG): if "@" in ignore_parameters: if classname == "<module>": log.debug("< %s", f.__name__) else: log.debug("< %s.%s", classname, f.__name__) else: if classname == "<module>": log.debug("< %s: %r", f.__name__, result) else: log.debug("< %s.%s: %r", classname, f.__name__, result) end_time_logdecorator = time.time() add_timing( "loguse function call overhead", end_time_logdecorator - end_time_callable + start_time_callable - start_time_logdecorator, ) return result return decorator end_time = time.time() add_timing("loguse function initialization overhead", end_time - start_time) if f: return real_loguse(f) else: return real_loguse
#!/usr/bin/env python3 # import http.client import os import random import sys import threading import time import urllib.error import urllib.request import pytest sys.path.append(os.getcwd()) import suapp """ - Fetch an object by its primary key: http://127.0.0.1:8385/service/fetch?table=Individual&key=5&module=modlib.base&pretty http://127.0.0.1:8385/service/fetch?table=UiIndividual&key=5&module=modlib.base&pretty http://127.0.0.1:8385/service/fetch?table=Kinship&key=5&key=5&module=modlib.kinship&pretty http://127.0.0.1:8385/service/fetch?table=UiKinship&key=5&key=5&module=modlib.kinship&pretty - Run a query defined in a modlib: http://127.0.0.1:8385/service/query/individual.adults?pagenum=1&pagesize=5&pretty - Fetccj objects from a one-to-many or many-to-many link: http://127.0.0.1:8385/service/setfetch?table=Individual&key=5&module=modlib.base&link=first_kinships&pretty http://127.0.0.1:8385/service/setfetch?table=UiIndividual&key=5&module=modlib.base&link=first_kinships&pretty """ random_string_params = [ None, (0, 10), (1, 10), (2, 20), (3, 30), (4, 40), (5, 1), (6, 2), (7, 3), (8, 4), (9, 5), (10, 6), (11, 7), (12, 8), (13, 9), ] def random_string(seed, length): random.seed(seed) uri = [] for x in range(length): uri.append(chr(random.randint(64, 122))) return "" # .join(uri) @pytest.fixture(scope="module") def launch(tmpdir_factory): # Launch with the localweb target, without client. datapath = tmpdir_factory.mktemp("data") logpath = tmpdir_factory.mktemp("log") configpath = tmpdir_factory.mktemp("config") flow = configpath.join("tlwt.flow") port = 8385 hostname = "127.0.0.1" flow.write( "\n".join( [ "START: Application.START", "ABOUT: About.IN", "CONFIGURATION: Configuration.IN", ] ) ) config = { "datasource": {"type": "sqlite", "filename": "%s/suapp.sqlite" % (datapath)}, "log": { "filemode": "w", "filename": "%s/suapp.log" % (logpath), "format": "%(asctime)s %(levelname)s %(name)s %(message)s", "level": "DEBUG", "modules": { "httpd": { "filename": "%s/httpd.accces_log" % (logpath), "level": "DEBUG", } }, }, "httpd": {"client": False, "ip": hostname, "port": port, "background": True}, "modules": {}, "target": "localweb", "name": "Test LocalWeb Target", "shortname": "TLWT", "self": "%s/tlwt.yml" % (configpath), } # Let's go. suapp.main(config) time.sleep(1) # ServerThread is now running, so do your tests... print("Before") yield (hostname, port) print("After") # Find the ServerThread and shut it down. for t in threading.enumerate(): print("Thread: %s (%r/%s) alive: %s" % (t.name, t, t, t.is_alive())) if t.name == "ServerThread": t.shutdown() @pytest.fixture(params=random_string_params) def random_uri(request): if not request.param: return "" else: (seed, length) = request.param random.seed(seed) uri = [] for x in range(length): uri.append(chr(random.randint(64, 122))) return "".join(uri) @pytest.fixture def random_service_uri(random_uri): return "service/" + random_uri @pytest.fixture(params=[None, (46546, 11)]) def random_random_uri(request, random_uri): uri = ["/"] if request.param: (seed, length) = request.param for x in range(length): uri.append(chr(random.randint(64, 122))) print("URI: %s" % (random_uri)) print(" +: %s" % (uri)) return random_uri + "".join(uri) def test_random_unauthenticated(launch, random_uri): with pytest.raises(urllib.error.HTTPError) as exc_info: with urllib.request.urlopen("http://%s:%s/" % launch + random_uri) as r: assert True == False # Not logged in, so should return HTTP Error 403: Forbidden assert exc_info.value.getcode() == 403 def test_random_service_uri(launch, random_service_uri): with pytest.raises(urllib.error.HTTPError) as exc_info: with urllib.request.urlopen("http://%s:%s/" % launch + random_service_uri) as r: assert True == False # Not logged in, so should return HTTP Error 403: Forbidden assert exc_info.value.getcode() == 403 def test_random_random_uri(launch, random_random_uri): with pytest.raises(urllib.error.HTTPError) as exc_info: with urllib.request.urlopen("http://%s:%s/" % launch + random_random_uri) as r: assert True == False # Not logged in, so should return # HTTP Error 403: Forbidden # HTTP Error 400: Bad Request assert exc_info.value.getcode() in [400, 403]
schilduil/suapp
tests/test_localweb_services.py
suapp/logdecorator.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Handles the "VOUnit" unit format. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from ...extern import six from ...extern.six.moves import zip import copy import keyword import re import warnings from . import generic from . import utils class VOUnit(generic.Generic): """ The IVOA standard for units used by the VO. This is an implementation of `Units in the VO 1.0 <http://www.ivoa.net/Documents/VOUnits/>`_. """ _explicit_custom_unit_regex = re.compile( "^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$") _custom_unit_regex = re.compile("^((?!\d)\w)+$") _custom_units = {} def __init__(self): if '_parser' not in VOUnit.__dict__: VOUnit._parser, VOUnit._lexer = self._make_parser() if not '_units' in VOUnit.__dict__: unit_names = VOUnit._generate_unit_names() VOUnit._units, VOUnit._deprecated_units = unit_names @staticmethod def _generate_unit_names(): from ... import units as u names = {} deprecated_names = set() bases = [ 'A', 'C', 'D', 'F', 'G', 'H', 'Hz', 'J', 'Jy', 'K', 'N', 'Ohm', 'Pa', 'R', 'Ry', 'S', 'T', 'V', 'W', 'Wb', 'a', 'adu', 'arcmin', 'arcsec', 'barn', 'beam', 'bin', 'cd', 'chan', 'count', 'ct', 'd', 'deg', 'eV', 'erg', 'g', 'h', 'lm', 'lx', 'lyr', 'm', 'mag', 'min', 'mol', 'pc', 'ph', 'photon', 'pix', 'pixel', 'rad', 'rad', 's', 'solLum', 'solMass', 'solRad', 'sr', 'u', 'voxel', 'yr' ] binary_bases = [ 'bit', 'byte', 'B' ] simple_units = [ 'Angstrom', 'angstrom', 'AU', 'au', 'Ba', 'dB', 'mas' ] si_prefixes = [ 'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd', '', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y' ] binary_prefixes = [ 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei' ] deprecated_units = set([ 'a', 'angstrom', 'Angstrom', 'au', 'Ba', 'barn', 'ct', 'erg', 'G', 'ph', 'pix' ]) def do_defines(bases, prefixes, skips=[]): for base in bases: for prefix in prefixes: key = prefix + base if key in skips: continue if keyword.iskeyword(key): continue names[key] = getattr(u, key) if base in deprecated_units: deprecated_names.add(key) do_defines(bases, si_prefixes, ['pct', 'pcount', 'yd']) do_defines(binary_bases, si_prefixes + binary_prefixes, ['dB', 'dbyte']) do_defines(simple_units, ['']) return names, deprecated_names def parse(self, s, debug=False): from ... import units as u if s in ('unknown', 'UNKNOWN'): return None if s == '': return u.dimensionless_unscaled if s.count('/') > 1: from ..core import UnitsError raise UnitsError( "'{0}' contains multiple slashes, which is " "disallowed by the VOUnit standard".format(s)) result = self._do_parse(s, debug=debug) return result @classmethod def _parse_unit(cls, unit, detailed_exception=True): from ... import units as u if unit not in cls._units: if cls._explicit_custom_unit_regex.match(unit): return cls._def_custom_unit(unit) if not cls._custom_unit_regex.match(unit): raise ValueError() warnings.warn( "Unit {0!r} not supported by the VOUnit " "standard. {1}".format( unit, utils.did_you_mean_units( unit, cls._units, cls._deprecated_units, cls._to_decomposed_alternative)), u.UnitsWarning) return cls._def_custom_unit(unit) if unit in cls._deprecated_units: utils.unit_deprecation_warning( unit, cls._units[unit], 'VOUnit', cls._to_decomposed_alternative) return cls._units[unit] @classmethod def _get_unit_name(cls, unit): from ... import units as u # The da- and d- prefixes are discouraged. This has the # effect of adding a scale to value in the result. if isinstance(unit, u.PrefixUnit): if unit._represents.scale == 10.0: raise ValueError( "In '{0}': VOUnit can not represent units with the 'da' " "(deka) prefix".format(unit)) elif unit._represents.scale == 0.1: raise ValueError( "In '{0}': VOUnit can not represent units with the 'd' " "(deci) prefix".format(unit)) name = unit.get_format_name('vounit') if unit in six.itervalues(cls._custom_units): return name if name not in cls._units: raise ValueError( "Unit {0!r} is not part of the VOUnit standard".format(name)) if name in cls._deprecated_units: utils.unit_deprecation_warning( name, unit, 'VOUnit', cls._to_decomposed_alternative) return name @classmethod def _def_custom_unit(cls, unit): from ... import units as u def def_base(name): if name in cls._custom_units: return cls._custom_units[name] if name.startswith("'"): return u.def_unit( [name[1:-1], name], format={'vounit': name}, namespace=cls._custom_units) else: return u.def_unit( name, namespace=cls._custom_units) if unit in cls._custom_units: return cls._custom_units[unit] for short, full, factor in u.si_prefixes: for prefix in short: if unit.startswith(prefix): base_name = unit[len(prefix):] base_unit = def_base(base_name) return u.PrefixUnit( [prefix + x for x in base_unit.names], u.CompositeUnit(factor, [base_unit], [1], _error_check=False), format={'vounit': prefix + base_unit.names[-1]}, namespace=cls._custom_units) return def_base(unit) @classmethod def to_string(cls, unit): from .. import core # Remove units that aren't known to the format unit = utils.decompose_to_known_units(unit, cls._get_unit_name) if isinstance(unit, core.CompositeUnit): if unit.physical_type == 'dimensionless' and unit.scale != 1: raise core.UnitScaleError( "The VOUnit format is not able to " "represent scale for dimensionless units. " "Multiply your data by {0:e}." .format(unit.scale)) s = '' if unit.scale != 1: m, ex = utils.split_mantissa_exponent(unit.scale) parts = [] if m: parts.append(m) if ex: fex = '10' if not ex.startswith('-'): fex += '+' fex += ex parts.append(fex) s += ' '.join(parts) pairs = list(zip(unit.bases, unit.powers)) pairs.sort(key=lambda x: x[1], reverse=True) s += cls._format_unit_list(pairs) elif isinstance(unit, core.NamedUnit): s = cls._get_unit_name(unit) return s @classmethod def _to_decomposed_alternative(cls, unit): from .. import core try: s = cls.to_string(unit) except core.UnitScaleError: scale = unit.scale unit = copy.copy(unit) unit._scale = 1.0 return '{0} (with data multiplied by {1})'.format( cls.to_string(unit), scale) return s
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy import testing as npt from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose from ... import units as u from ...utils import minversion """ These are the tests for coordinate matching. Note that this requires scipy. """ try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): OLDER_SCIPY = False else: OLDER_SCIPY = True @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function(): from .. import ICRS from ..matching import match_coordinates_3d #this only uses match_coordinates_3d because that's the actual implementation cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) assert d3d.value[0] == 0 idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2) assert np.all(idx == 2) npt.assert_array_almost_equal(d2d.degree, [1, 0.9]) npt.assert_array_less(d3d.value, 0.02) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function_3d_and_sky(): from .. import ICRS from ..matching import match_coordinates_3d, match_coordinates_sky cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [2, 3]) assert_allclose(d2d, [1, 1.9] * u.deg) assert np.abs(d3d[0].to(u.kpc).value - np.radians(1)) < 1e-6 assert np.abs(d3d[1].to(u.kpc).value - 5*np.radians(1.9)) < 1e-5 idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) assert_allclose(d2d, [0, 0.1] * u.deg) assert_allclose(d3d, [4, 4.0000019] * u.kpc) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_kdtree_storage(): from .. import ICRS from ..matching import match_coordinates_3d cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=False) assert not hasattr(ccatalog, '_kdtree') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=True) assert hasattr(ccatalog, '_kdtree') assert not hasattr(ccatalog, 'tislit_cheese') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree='tislit_cheese') assert hasattr(ccatalog, 'tislit_cheese') assert not hasattr(cmatch, 'tislit_cheese') @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_method(): from .. import ICRS, SkyCoord from ...utils import NumpyRNGContext from ..matching import match_coordinates_3d, match_coordinates_sky with NumpyRNGContext(987654321): cmatch = ICRS(np.random.rand(20) * 360.*u.degree, (np.random.rand(20) * 180. - 90.)*u.degree) ccatalog = ICRS(np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.)*u.degree) idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) #should be the same as above because there's no distance, but just make sure this method works idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) assert len(idx1) == len(d2d1) == len(d3d1) == 20 @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around(): from .. import ICRS from ..matching import search_around_sky, search_around_3d coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg) idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg) assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)] assert d2d_1deg[0] == 1.0*u.deg assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg) assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)] idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc) idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc) assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)] assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)] assert_allclose(d2d_sm, [2, 1]*u.deg) @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around_scalar(): from astropy.coordinates import SkyCoord, Angle cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg") target = SkyCoord('1.1 -30.1', unit="deg") with pytest.raises(ValueError) as excinfo: cat.search_around_sky(target, Angle('2d')) # make sure the error message is *specific* to search_around_sky rather than # generic as reported in #3359 assert 'search_around_sky' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: cat.search_around_3d(target, Angle('2d')) assert 'search_around_3d' in str(excinfo.value)
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/coordinates/tests/test_matching.py
scib/lib/python3.5/site-packages/astropy/units/format/vounit.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains functions for matching coordinate catalogs. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from ..extern import six from .representation import UnitSphericalRepresentation from .. import units as u __all__ = ['match_coordinates_3d', 'match_coordinates_sky', 'search_around_3d', 'search_around_sky'] def match_coordinates_3d(matchcoord, catalogcoord, nthneighbor=1, storekdtree='_kdtree_3d'): """ Finds the nearest 3-dimensional matches of a coordinate or coordinates in a set of catalog coordinates. This finds the 3-dimensional closest neighbor, which is only different from the on-sky distance if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord``, as an attribute in ``catalogcoord`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : integer array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. Notes ----- This function requires `SciPy <http://www.scipy.org>`_ to be installed or it will fail. """ kdt = _get_cartesian_kdtree(catalogcoord, storekdtree) #make sure coordinate systems match matchcoord = matchcoord.transform_to(catalogcoord) #make sure units match catunit = catalogcoord.cartesian.x.unit matchxyz = matchcoord.cartesian.xyz.to(catunit) matchflatxyz = matchxyz.reshape((3, np.prod(matchxyz.shape) // 3)) dist, idx = kdt.query(matchflatxyz.T, nthneighbor) if nthneighbor > 1: # query gives 1D arrays if k=1, 2D arrays otherwise dist = dist[:, -1] idx = idx[:, -1] sep2d = catalogcoord[idx].separation(matchcoord) return idx.reshape(matchxyz.shape[1:]), sep2d, dist.reshape(matchxyz.shape[1:]) * catunit def match_coordinates_sky(matchcoord, catalogcoord, nthneighbor=1, storekdtree='_kdtree_sky'): """ Finds the nearest on-sky matches of a coordinate or coordinates in a set of catalog coordinates. This finds the on-sky closest neighbor, which is only different from the 3-dimensional match if ``distance`` is set in either ``matchcoord`` or ``catalogcoord``. Parameters ---------- matchcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinate(s) to match to the catalog. catalogcoord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The base catalog in which to search for matches. Typically this will be a coordinate object that is an array (i.e., ``catalogcoord.isscalar == False``) nthneighbor : int, optional Which closest neighbor to search for. Typically ``1`` is desired here, as that is correct for matching one set of coordinates to another. The next likely use case is ``2``, for matching a coordinate catalog against *itself* (``1`` is inappropriate because each point will find itself as the closest match). storekdtree : bool or str, optional If a string, will store the KD-Tree used for the computation in the ``catalogcoord`` as an attribute in ``catalogcoord`` with the provided name. This dramatically speeds up subsequent calls with the same catalog. If False, the KD-Tree is discarded after use. Returns ------- idx : integer array Indices into ``catalogcoord`` to get the matched points for each ``matchcoord``. Shape matches ``matchcoord``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. dist3d : `~astropy.units.Quantity` The 3D distance between the closest match for each ``matchcoord`` and the ``matchcoord``. Shape matches ``matchcoord``. If either ``matchcoord`` or ``catalogcoord`` don't have a distance, this is the 3D distance on the unit sphere, rather than a true distance. Notes ----- This function requires `SciPy <http://www.scipy.org>`_ to be installed or it will fail. """ # send to catalog frame newmatch = matchcoord.transform_to(catalogcoord) #strip out distance info match_urepr = newmatch.data.represent_as(UnitSphericalRepresentation) newmatch_u = newmatch.realize_frame(match_urepr) cat_urepr = catalogcoord.data.represent_as(UnitSphericalRepresentation) newcat_u = catalogcoord.realize_frame(cat_urepr) if isinstance(storekdtree, six.string_types) and hasattr(catalogcoord, storekdtree): # Check for a stored KD-tree on the passed-in coordinate. Normally it # will have a distinct name from the "3D" one, so it's safe to use even # though it's based on UnitSphericalRepresentation. storekdtree = getattr(catalogcoord, storekdtree) idx, sep2d, sep3d = match_coordinates_3d(newmatch_u, newcat_u, nthneighbor, storekdtree) # sep3d is *wrong* above, because the distance information was removed, # unless one of the catalogs doesn't have a real distance if not (isinstance(catalogcoord.data, UnitSphericalRepresentation) or isinstance(newmatch.data, UnitSphericalRepresentation)): sep3d = catalogcoord[idx].separation_3d(newmatch) #update the kdtree on the actual passed-in coordinate if isinstance(storekdtree, six.string_types): setattr(catalogcoord, storekdtree, getattr(newcat_u, storekdtree)) return idx, sep2d, sep3d def search_around_3d(coords1, coords2, distlimit, storekdtree='_kdtree_3d'): """ Searches for pairs of points that are at least as close as a specified distance in 3D space. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation_3d`` methods. Parameters ---------- coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. distlimit : `~astropy.units.Quantity` with distance units The physical radius to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search as attributes with the name ``storekdtree`` in ``coords2``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : integer array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : integer array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``. Notes ----- This function requires `SciPy <http://www.scipy.org>`_ (>=0.12.0) to be installed or it will fail. If you are using this function to search in a catalog for matches around specific points, the convention is for ``coords2`` to be the catalog, and ``coords1`` are the points to search around. While these operations are mathematically the same if ``coords1`` and ``coords2`` are flipped, some of the optimizations may work better if this convention is obeyed. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. """ if not distlimit.isscalar: raise ValueError('distlimit must be a scalar in search_around_3d') if coords1.isscalar or coords2.isscalar: raise ValueError('One of the inputs to search_around_3d is a scalar. ' 'search_around_3d is intended for use with array ' 'coordinates, not scalars. Instead, use ' '``coord1.separation_3d(coord2) < distlimit`` to find ' 'the coordinates near a scalar coordinate.') kdt2 = _get_cartesian_kdtree(coords2, storekdtree) cunit = coords2.cartesian.x.unit # we convert coord1 to match coord2's frame. We do it this way # so that if the conversion does happen, the KD tree of coord2 at least gets # saved. (by convention, coord2 is the "catalog" if that makes sense) coords1 = coords1.transform_to(coords2) kdt1 = _get_cartesian_kdtree(coords1, storekdtree, forceunit=cunit) # this is the *cartesian* 3D distance that corresponds to the given angle d = distlimit.to(cunit).value idxs1 = [] idxs2 = [] for i, matches in enumerate(kdt1.query_ball_tree(kdt2, d)): for match in matches: idxs1.append(i) idxs2.append(match) idxs1 = np.array(idxs1) idxs2 = np.array(idxs2) if idxs1.size == 0: d2ds = u.Quantity([], u.deg) d3ds = u.Quantity([], u.dimensionless_unscaled) else: d2ds = coords1[idxs1].separation(coords2[idxs2]) d3ds = coords1[idxs1].separation_3d(coords2[idxs2]) return idxs1, idxs2, d2ds, d3ds def search_around_sky(coords1, coords2, seplimit, storekdtree='_kdtree_sky'): """ Searches for pairs of points that have an angular separation at least as close as a specified angle. This is intended for use on coordinate objects with arrays of coordinates, not scalars. For scalar coordinates, it is better to use the ``separation`` methods. Parameters ---------- coords1 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The first set of coordinates, which will be searched for matches from ``coords2`` within ``seplimit``. Cannot be a scalar coordinate. coords2 : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The second set of coordinates, which will be searched for matches from ``coords1`` within ``seplimit``. Cannot be a scalar coordinate. seplimit : `~astropy.units.Quantity` with angle units The on-sky separation to search within. storekdtree : bool or str, optional If a string, will store the KD-Tree used in the search as attributes with the name ``storekdtree`` in ``coords2``. This speeds up subsequent calls to this function. If False, the KD-Trees are not saved. Returns ------- idx1 : integer array Indices into ``coords1`` that matches to the corresponding element of ``idx2``. Shape matches ``idx2``. idx2 : integer array Indices into ``coords2`` that matches to the corresponding element of ``idx1``. Shape matches ``idx1``. sep2d : `~astropy.coordinates.Angle` The on-sky separation between the coordinates. Shape matches ``idx1`` and ``idx2``. dist3d : `~astropy.units.Quantity` The 3D distance between the coordinates. Shape matches ``idx1`` and ``idx2``. If either ``coords1`` or ``coords2`` don't have a distance, this is the 3D distance on the unit sphere, rather than a physical distance. Notes ----- This function requires `SciPy <http://www.scipy.org>`_ (>=0.12.0) to be installed or it will fail. In the current implementation, the return values are always sorted in the same order as the ``coords1`` (so ``idx1`` is in ascending order). This is considered an implementation detail, though, so it could change in a future release. """ from . import Angle if not seplimit.isscalar: raise ValueError('seplimit must be a scalar in search_around_sky') if coords1.isscalar or coords2.isscalar: raise ValueError('One of the inputs to search_around_sky is a scalar. ' 'search_around_sky is intended for use with array ' 'coordinates, not scalars. Instead, use ' '``coord1.separation(coord2) < seplimit`` to find the ' 'coordinates near a scalar coordinate.') # we convert coord1 to match coord2's frame. We do it this way # so that if the conversion does happen, the KD tree of coord2 at least gets # saved. (by convention, coord2 is the "catalog" if that makes sense) coords1 = coords1.transform_to(coords2) #strip out distance info urepr1 = coords1.data.represent_as(UnitSphericalRepresentation) ucoords1 = coords1.realize_frame(urepr1) kdt1 = _get_cartesian_kdtree(ucoords1, storekdtree) if storekdtree and hasattr(coords2, storekdtree): #just use the stored KD-Tree kdt2 = getattr(coords2, storekdtree) else: #strip out distance info urepr2 = coords2.data.represent_as(UnitSphericalRepresentation) ucoords2 = coords2.realize_frame(urepr2) kdt2 = _get_cartesian_kdtree(ucoords2, storekdtree) if storekdtree: #save the KD-Tree in coords2, *not* ucoords2 setattr(coords2, storekdtree, kdt2) # this is the *cartesian* 3D distance that corresponds to the given angle r = (2 * np.sin(Angle(seplimit) / 2.0)).value idxs1 = [] idxs2 = [] for i, matches in enumerate(kdt1.query_ball_tree(kdt2, r)): for match in matches: idxs1.append(i) idxs2.append(match) idxs1 = np.array(idxs1) idxs2 = np.array(idxs2) if idxs1.size == 0: d2ds = u.Quantity([], u.deg) d3ds = u.Quantity([], u.dimensionless_unscaled) else: d2ds = coords1[idxs1].separation(coords2[idxs2]) try: d3ds = coords1[idxs1].separation_3d(coords2[idxs2]) except ValueError: # they don't have distances, so we just fall back on the cartesian # distance, computed from d2ds d3ds = 2 * np.sin(d2ds / 2.0) return idxs1, idxs2, d2ds, d3ds def _get_cartesian_kdtree(coord, attrname_or_kdt='_kdtree', forceunit=None): """ This is a utility function to retrieve (and build/cache, if necessary) a 3D cartesian KD-Tree from various sorts of astropy coordinate objects. Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The coordinates to build the KD-Tree for. attrname_or_kdt : bool or str or KDTree If a string, will store the KD-Tree used for the computation in the ``coord``, as an attribute in ``coord`` with the provided name. If given as a KD-Tree, it will just be used directly. forceunit: unit or None If a unit, the cartesian coordinates will convert to that unit before being put in the KD-Tree. If None, whatever unit it's already in will be used Returns ------- kdt : `~scipy.spatial.cKDTree` or `~scipy.spatial.KDTree` The KD-Tree representing the 3D cartesian representation of the input coordinates. """ from warnings import warn #without scipy this will immediately fail from scipy import spatial try: KDTree = spatial.cKDTree except: warn('C-based KD tree not found, falling back on (much slower) ' 'python implementation') KDTree = spatial.KDTree if attrname_or_kdt is True: # backwards compatibility for pre v0.4 attrname_or_kdt = '_kdtree' # figure out where any cached KDTree might be if isinstance(attrname_or_kdt, six.string_types): kdt = getattr(coord, attrname_or_kdt, None) if kdt is not None and not isinstance(kdt, KDTree): raise ValueError('The `attrname_or_kdt` "{0}" is not a scipy KD tree!'.format(attrname_or_kdt)) elif isinstance(attrname_or_kdt, KDTree): kdt = attrname_or_kdt attrname_or_kdt = None elif not attrname_or_kdt: kdt = None else: raise ValueError('Invalid `attrname_or_kdt` argument for KD-Tree:' + str(attrname_or_kdt)) if kdt is None: #need to build the cartesian KD-tree for the catalog if forceunit is None: cartxyz = coord.cartesian.xyz else: cartxyz = coord.cartesian.xyz.to(forceunit) flatxyz = cartxyz.reshape((3, np.prod(cartxyz.shape) // 3)) kdt = KDTree(flatxyz.value.T) if attrname_or_kdt: #cache the kdtree in `coord` setattr(coord, attrname_or_kdt, kdt) return kdt
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy import testing as npt from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose from ... import units as u from ...utils import minversion """ These are the tests for coordinate matching. Note that this requires scipy. """ try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): OLDER_SCIPY = False else: OLDER_SCIPY = True @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function(): from .. import ICRS from ..matching import match_coordinates_3d #this only uses match_coordinates_3d because that's the actual implementation cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) assert d3d.value[0] == 0 idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2) assert np.all(idx == 2) npt.assert_array_almost_equal(d2d.degree, [1, 0.9]) npt.assert_array_less(d3d.value, 0.02) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function_3d_and_sky(): from .. import ICRS from ..matching import match_coordinates_3d, match_coordinates_sky cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [2, 3]) assert_allclose(d2d, [1, 1.9] * u.deg) assert np.abs(d3d[0].to(u.kpc).value - np.radians(1)) < 1e-6 assert np.abs(d3d[1].to(u.kpc).value - 5*np.radians(1.9)) < 1e-5 idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) assert_allclose(d2d, [0, 0.1] * u.deg) assert_allclose(d3d, [4, 4.0000019] * u.kpc) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_kdtree_storage(): from .. import ICRS from ..matching import match_coordinates_3d cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=False) assert not hasattr(ccatalog, '_kdtree') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=True) assert hasattr(ccatalog, '_kdtree') assert not hasattr(ccatalog, 'tislit_cheese') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree='tislit_cheese') assert hasattr(ccatalog, 'tislit_cheese') assert not hasattr(cmatch, 'tislit_cheese') @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_method(): from .. import ICRS, SkyCoord from ...utils import NumpyRNGContext from ..matching import match_coordinates_3d, match_coordinates_sky with NumpyRNGContext(987654321): cmatch = ICRS(np.random.rand(20) * 360.*u.degree, (np.random.rand(20) * 180. - 90.)*u.degree) ccatalog = ICRS(np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.)*u.degree) idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) #should be the same as above because there's no distance, but just make sure this method works idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) assert len(idx1) == len(d2d1) == len(d3d1) == 20 @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around(): from .. import ICRS from ..matching import search_around_sky, search_around_3d coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg) idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg) assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)] assert d2d_1deg[0] == 1.0*u.deg assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg) assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)] idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc) idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc) assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)] assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)] assert_allclose(d2d_sm, [2, 1]*u.deg) @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around_scalar(): from astropy.coordinates import SkyCoord, Angle cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg") target = SkyCoord('1.1 -30.1', unit="deg") with pytest.raises(ValueError) as excinfo: cat.search_around_sky(target, Angle('2d')) # make sure the error message is *specific* to search_around_sky rather than # generic as reported in #3359 assert 'search_around_sky' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: cat.search_around_3d(target, Angle('2d')) assert 'search_around_3d' in str(excinfo.value)
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/coordinates/tests/test_matching.py
scib/lib/python3.5/site-packages/astropy/coordinates/matching.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file connects the readers/writers to the astropy.table.Table class from __future__ import absolute_import, division, print_function import re import functools from .. import registry as io_registry from ...table import Table from ...extern.six.moves import zip __all__ = [] # Generic # ======= def read_asciitable(filename, **kwargs): from .ui import read return read(filename, **kwargs) io_registry.register_reader('ascii', Table, read_asciitable) def write_asciitable(table, filename, **kwargs): from .ui import write return write(table, filename, **kwargs) io_registry.register_writer('ascii', Table, write_asciitable) def io_read(format, filename, **kwargs): from .ui import read format = re.sub(r'^ascii\.', '', format) return read(filename, format=format, **kwargs) def io_write(format, table, filename, **kwargs): from .ui import write format = re.sub(r'^ascii\.', '', format) return write(table, filename, format=format, **kwargs) def io_identify(suffix, origin, filepath, fileobj, *args, **kwargs): return filepath is not None and filepath.endswith(suffix) def _get_connectors_table(): from .core import FORMAT_CLASSES rows = [] rows.append(('ascii', '', 'Yes', 'ASCII table in any supported format (uses guessing)')) for format in sorted(FORMAT_CLASSES): cls = FORMAT_CLASSES[format] io_format = 'ascii.' + cls._format_name description = getattr(cls, '_description', '') class_link = ':class:`~{0}.{1}`'.format(cls.__module__, cls.__name__) suffix = getattr(cls, '_io_registry_suffix', '') can_write = 'Yes' if getattr(cls, '_io_registry_can_write', True) else '' rows.append((io_format, suffix, can_write, '{0}: {1}'.format(class_link, description))) out = Table(list(zip(*rows)), names=('Format', 'Suffix', 'Write', 'Description')) for colname in ('Format', 'Description'): width = max(len(x) for x in out[colname]) out[colname].format = '%-{0}s'.format(width) return out # Specific # ======== def read_csv(filename, **kwargs): from .ui import read kwargs['format'] = 'csv' return read(filename, **kwargs) def write_csv(table, filename, **kwargs): from .ui import write kwargs['format'] = 'csv' return write(table, filename, **kwargs) csv_identify = functools.partial(io_identify, '.csv') io_registry.register_reader('csv', Table, read_csv) io_registry.register_writer('csv', Table, write_csv) io_registry.register_identifier('csv', Table, csv_identify)
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy import testing as npt from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose from ... import units as u from ...utils import minversion """ These are the tests for coordinate matching. Note that this requires scipy. """ try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): OLDER_SCIPY = False else: OLDER_SCIPY = True @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function(): from .. import ICRS from ..matching import match_coordinates_3d #this only uses match_coordinates_3d because that's the actual implementation cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) assert d3d.value[0] == 0 idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2) assert np.all(idx == 2) npt.assert_array_almost_equal(d2d.degree, [1, 0.9]) npt.assert_array_less(d3d.value, 0.02) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function_3d_and_sky(): from .. import ICRS from ..matching import match_coordinates_3d, match_coordinates_sky cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [2, 3]) assert_allclose(d2d, [1, 1.9] * u.deg) assert np.abs(d3d[0].to(u.kpc).value - np.radians(1)) < 1e-6 assert np.abs(d3d[1].to(u.kpc).value - 5*np.radians(1.9)) < 1e-5 idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) assert_allclose(d2d, [0, 0.1] * u.deg) assert_allclose(d3d, [4, 4.0000019] * u.kpc) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_kdtree_storage(): from .. import ICRS from ..matching import match_coordinates_3d cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=False) assert not hasattr(ccatalog, '_kdtree') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=True) assert hasattr(ccatalog, '_kdtree') assert not hasattr(ccatalog, 'tislit_cheese') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree='tislit_cheese') assert hasattr(ccatalog, 'tislit_cheese') assert not hasattr(cmatch, 'tislit_cheese') @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_method(): from .. import ICRS, SkyCoord from ...utils import NumpyRNGContext from ..matching import match_coordinates_3d, match_coordinates_sky with NumpyRNGContext(987654321): cmatch = ICRS(np.random.rand(20) * 360.*u.degree, (np.random.rand(20) * 180. - 90.)*u.degree) ccatalog = ICRS(np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.)*u.degree) idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) #should be the same as above because there's no distance, but just make sure this method works idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) assert len(idx1) == len(d2d1) == len(d3d1) == 20 @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around(): from .. import ICRS from ..matching import search_around_sky, search_around_3d coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg) idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg) assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)] assert d2d_1deg[0] == 1.0*u.deg assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg) assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)] idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc) idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc) assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)] assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)] assert_allclose(d2d_sm, [2, 1]*u.deg) @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around_scalar(): from astropy.coordinates import SkyCoord, Angle cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg") target = SkyCoord('1.1 -30.1', unit="deg") with pytest.raises(ValueError) as excinfo: cat.search_around_sky(target, Angle('2d')) # make sure the error message is *specific* to search_around_sky rather than # generic as reported in #3359 assert 'search_around_sky' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: cat.search_around_3d(target, Angle('2d')) assert 'search_around_3d' in str(excinfo.value)
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/coordinates/tests/test_matching.py
scib/lib/python3.5/site-packages/astropy/io/ascii/connect.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import absolute_import, division, print_function, unicode_literals
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy import testing as npt from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose from ... import units as u from ...utils import minversion """ These are the tests for coordinate matching. Note that this requires scipy. """ try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): OLDER_SCIPY = False else: OLDER_SCIPY = True @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function(): from .. import ICRS from ..matching import match_coordinates_3d #this only uses match_coordinates_3d because that's the actual implementation cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) assert d3d.value[0] == 0 idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2) assert np.all(idx == 2) npt.assert_array_almost_equal(d2d.degree, [1, 0.9]) npt.assert_array_less(d3d.value, 0.02) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function_3d_and_sky(): from .. import ICRS from ..matching import match_coordinates_3d, match_coordinates_sky cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [2, 3]) assert_allclose(d2d, [1, 1.9] * u.deg) assert np.abs(d3d[0].to(u.kpc).value - np.radians(1)) < 1e-6 assert np.abs(d3d[1].to(u.kpc).value - 5*np.radians(1.9)) < 1e-5 idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) assert_allclose(d2d, [0, 0.1] * u.deg) assert_allclose(d3d, [4, 4.0000019] * u.kpc) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_kdtree_storage(): from .. import ICRS from ..matching import match_coordinates_3d cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=False) assert not hasattr(ccatalog, '_kdtree') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=True) assert hasattr(ccatalog, '_kdtree') assert not hasattr(ccatalog, 'tislit_cheese') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree='tislit_cheese') assert hasattr(ccatalog, 'tislit_cheese') assert not hasattr(cmatch, 'tislit_cheese') @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_method(): from .. import ICRS, SkyCoord from ...utils import NumpyRNGContext from ..matching import match_coordinates_3d, match_coordinates_sky with NumpyRNGContext(987654321): cmatch = ICRS(np.random.rand(20) * 360.*u.degree, (np.random.rand(20) * 180. - 90.)*u.degree) ccatalog = ICRS(np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.)*u.degree) idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) #should be the same as above because there's no distance, but just make sure this method works idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) assert len(idx1) == len(d2d1) == len(d3d1) == 20 @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around(): from .. import ICRS from ..matching import search_around_sky, search_around_3d coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg) idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg) assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)] assert d2d_1deg[0] == 1.0*u.deg assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg) assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)] idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc) idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc) assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)] assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)] assert_allclose(d2d_sm, [2, 1]*u.deg) @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around_scalar(): from astropy.coordinates import SkyCoord, Angle cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg") target = SkyCoord('1.1 -30.1', unit="deg") with pytest.raises(ValueError) as excinfo: cat.search_around_sky(target, Angle('2d')) # make sure the error message is *specific* to search_around_sky rather than # generic as reported in #3359 assert 'search_around_sky' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: cat.search_around_3d(target, Angle('2d')) assert 'search_around_3d' in str(excinfo.value)
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/coordinates/tests/test_matching.py
scib/lib/python3.5/site-packages/astropy/wcs/tests/__init__.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) """ This is the APE5 coordinates API document re-written to work as a series of test functions. Note that new tests for coordinates functionality should generally *not* be added to this file - instead, add them to other appropriate test modules in this package, like ``test_sky_coord.py``, ``test_frames.py``, or ``test_representation.py``. This file is instead meant mainly to keep track of deviations from the original APE5 plan. """ import numpy as np from numpy.random import randn from numpy import testing as npt from ...tests.helper import (pytest, quantity_allclose as allclose, assert_quantity_allclose as assert_allclose) raises = pytest.raises from ... import units as u from ... import time from ... import coordinates as coords from ..errors import * try: import scipy except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True def test_representations_api(): from ..representation import SphericalRepresentation, \ UnitSphericalRepresentation, PhysicsSphericalRepresentation, \ CartesianRepresentation from ... coordinates import Angle, Longitude, Latitude, Distance #<-----------------Classes for representation of coordinate data---------------> # These classes inherit from a common base class and internally contain Quantity # objects, which are arrays (although they may act as scalars, like numpy's # length-0 "arrays") # They can be initialized with a variety of ways that make intuitive sense. # Distance is optional. UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) UnitSphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg) SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc) # In the initial implementation, the lat/lon/distance arguments to the # initializer must be in order. A *possible* future change will be to allow # smarter guessing of the order. E.g. `Latitude` and `Longitude` objects can be # given in any order. UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg)) SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)) # Arrays of any of the inputs are fine UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg) # Default is to copy arrays, but optionally, it can be a reference UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, copy=False) # strings are parsed by `Latitude` and `Longitude` constructors, so no need to # implement parsing in the Representation classes UnitSphericalRepresentation(lon=Angle('2h6m3.3s'), lat=Angle('0.1rad')) # Or, you can give `Quantity`s with keywords, and they will be internally # converted to Angle/Distance c1 = SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc) # Can also give another representation object with the `reprobj` keyword. c2 = SphericalRepresentation.from_representation(c1) # distance, lat, and lon typically will just match in shape SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=[10, 11]*u.kpc) # if the inputs are not the same, if possible they will be broadcast following # numpy's standard broadcasting rules. c2 = SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=10*u.kpc) assert len(c2.distance) == 2 #when they can't be broadcast, it is a ValueError (same as Numpy) with raises(ValueError): c2 = UnitSphericalRepresentation(lon=[8, 9, 10]*u.hourangle, lat=[5, 6]*u.deg) # It's also possible to pass in scalar quantity lists with mixed units. These # are converted to array quantities following the same rule as `Quantity`: all # elements are converted to match the first element's units. c2 = UnitSphericalRepresentation(lon=Angle([8*u.hourangle, 135*u.deg]), lat=Angle([5*u.deg, (6*np.pi/180)*u.rad])) assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle npt.assert_almost_equal(c2.lon[1].value, 9) # The Quantity initializer itself can also be used to force the unit even if the # first element doesn't have the right unit lon = u.Quantity([120*u.deg, 135*u.deg], u.hourangle) lat = u.Quantity([(5*np.pi/180)*u.rad, 0.4*u.hourangle], u.deg) c2 = UnitSphericalRepresentation(lon, lat) # regardless of how input, the `lat` and `lon` come out as angle/distance assert isinstance(c1.lat, Angle) assert isinstance(c1.lat, Latitude) # `Latitude` is an `Angle` subclass assert isinstance(c1.distance, Distance) # but they are read-only, as representations are immutable once created with raises(AttributeError): c1.lat = Latitude(5, u.deg) # Note that it is still possible to modify the array in-place, but this is not # sanctioned by the API, as this would prevent things like caching. c2.lat[:] = [0] * u.deg # possible, but NOT SUPPORTED # To address the fact that there are various other conventions for how spherical # coordinates are defined, other conventions can be included as new classes. # Later there may be other conventions that we implement - for now just the # physics convention, as it is one of the most common cases. c3 = PhysicsSphericalRepresentation(phi=120*u.deg, theta=85*u.deg, r=3*u.kpc) # first dimension must be length-3 if a lone `Quantity` is passed in. c1 = CartesianRepresentation(randn(3, 100) * u.kpc) assert c1.xyz.shape[0] == 3 assert c1.xyz.unit == u.kpc assert c1.x.shape[0] == 100 assert c1.y.shape[0] == 100 assert c1.z.shape[0] == 100 # can also give each as separate keywords CartesianRepresentation(x=randn(100)*u.kpc, y=randn(100)*u.kpc, z=randn(100)*u.kpc) # if the units don't match but are all distances, they will automatically be # converted to match `x` xarr, yarr, zarr = randn(3, 100) c1 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.kpc) c2 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.pc) assert c1.xyz.unit == c2.xyz.unit == u.kpc assert_allclose((c1.z / 1000) - c2.z, 0*u.kpc, atol=1e-10*u.kpc) # representations convert into other representations via `represent_as` srep = SphericalRepresentation(lon=90*u.deg, lat=0*u.deg, distance=1*u.pc) crep = srep.represent_as(CartesianRepresentation) assert_allclose(crep.x, 0*u.pc, atol=1e-10*u.pc) assert_allclose(crep.y, 1*u.pc, atol=1e-10*u.pc) assert_allclose(crep.z, 0*u.pc, atol=1e-10*u.pc) # The functions that actually do the conversion are defined via methods on the # representation classes. This may later be expanded into a full registerable # transform graph like the coordinate frames, but initially it will be a simpler # method system def test_frame_api(): from ..representation import SphericalRepresentation, \ UnitSphericalRepresentation from ..builtin_frames import ICRS, FK5 #<---------------------Reference Frame/"Low-level" classes---------------------> # The low-level classes have a dual role: they act as specifiers of coordinate # frames and they *may* also contain data as one of the representation objects, # in which case they are the actual coordinate objects themselves. # They can always accept a representation as a first argument icrs = ICRS(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)) # which is stored as the `data` attribute assert icrs.data.lat == 5*u.deg assert icrs.data.lon == 8*u.hourangle # Frames that require additional information like equinoxs or obstimes get them # as keyword parameters to the frame constructor. Where sensible, defaults are # used. E.g., FK5 is almost always J2000 equinox fk5 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)) J2000 = time.Time('J2000', scale='utc') fk5_2000 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg), equinox=J2000) assert fk5.equinox == fk5_2000.equinox # the information required to specify the frame is immutable J2001 = time.Time('J2001', scale='utc') with raises(AttributeError): fk5.equinox = J2001 # Similar for the representation data. with raises(AttributeError): fk5.data = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) # There is also a class-level attribute that lists the attributes needed to # identify the frame. These include attributes like `equinox` shown above. assert all([nm in ('equinox', 'obstime') for nm in fk5.get_frame_attr_names()]) # the result of `get_frame_attr_names` is called for particularly in the # high-level class (discussed below) to allow round-tripping between various # frames. It is also part of the public API for other similar developer / # advanced users' use. # The actual position information is accessed via the representation objects assert_allclose(icrs.represent_as(SphericalRepresentation).lat, 5*u.deg) # shorthand for the above assert_allclose(icrs.spherical.lat, 5*u.deg) assert icrs.cartesian.z.value > 0 # Many frames have a "default" representation, the one in which they are # conventionally described, often with a special name for some of the # coordinates. E.g., most equatorial coordinate systems are spherical with RA and # Dec. This works simply as a shorthand for the longer form above assert_allclose(icrs.dec, 5*u.deg) assert_allclose(fk5.ra, 8*u.hourangle) assert icrs.representation == SphericalRepresentation # low-level classes can also be initialized with names valid for that representation # and frame: icrs_2 = ICRS(ra=8*u.hour, dec=5*u.deg, distance=1*u.kpc) assert_allclose(icrs.ra, icrs_2.ra) # and these are taken as the default if keywords are not given: #icrs_nokwarg = ICRS(8*u.hour, 5*u.deg, distance=1*u.kpc) #assert icrs_nokwarg.ra == icrs_2.ra and icrs_nokwarg.dec == icrs_2.dec # they also are capable of computing on-sky or 3d separations from each other, # which will be a direct port of the existing methods: coo1 = ICRS(ra=0*u.hour, dec=0*u.deg) coo2 = ICRS(ra=0*u.hour, dec=1*u.deg) # `separation` is the on-sky separation assert coo1.separation(coo2).degree == 1.0 # while `separation_3d` includes the 3D distance information coo3 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=1*u.kpc) coo4 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=2*u.kpc) assert coo3.separation_3d(coo4).kpc == 1.0 # The next example fails because `coo1` and `coo2` don't have distances with raises(ValueError): assert coo1.separation_3d(coo2).kpc == 1.0 # repr/str also shows info, with frame and data #assert repr(fk5) == '' def test_transform_api(): from ..representation import UnitSphericalRepresentation from ..builtin_frames import ICRS, FK5 from ..baseframe import frame_transform_graph, BaseCoordinateFrame from ..transformations import DynamicMatrixTransform #<-------------------------Transformations-------------------------------------> # Transformation functionality is the key to the whole scheme: they transform # low-level classes from one frame to another. #(used below but defined above in the API) fk5 = FK5(ra=8*u.hour, dec=5*u.deg) # If no data (or `None`) is given, the class acts as a specifier of a frame, but # without any stored data. J2001 = time.Time('J2001', scale='utc') fk5_J2001_frame = FK5(equinox=J2001) # if they do not have data, the string instead is the frame specification assert repr(fk5_J2001_frame) == "<FK5 Frame (equinox=J2001.000)>" # Note that, although a frame object is immutable and can't have data added, it # can be used to create a new object that does have data by giving the # `realize_frame` method a representation: srep = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg) fk5_j2001_with_data = fk5_J2001_frame.realize_frame(srep) assert fk5_j2001_with_data.data is not None # Now `fk5_j2001_with_data` is in the same frame as `fk5_J2001_frame`, but it # is an actual low-level coordinate, rather than a frame without data. # These frames are primarily useful for specifying what a coordinate should be # transformed *into*, as they are used by the `transform_to` method # E.g., this snippet precesses the point to the new equinox newfk5 = fk5.transform_to(fk5_J2001_frame) assert newfk5.equinox == J2001 # classes can also be given to `transform_to`, which then uses the defaults for # the frame information: samefk5 = fk5.transform_to(FK5) # `fk5` was initialized using default `obstime` and `equinox`, so: assert_allclose(samefk5.ra, fk5.ra, atol=1e-10*u.deg) assert_allclose(samefk5.dec, fk5.dec, atol=1e-10*u.deg) # transforming to a new frame necessarily loses framespec information if that # information is not applicable to the new frame. This means transforms are not # always round-trippable: fk5_2 = FK5(ra=8*u.hour, dec=5*u.deg, equinox=J2001) ic_trans = fk5_2.transform_to(ICRS) # `ic_trans` does not have an `equinox`, so now when we transform back to FK5, # it's a *different* RA and Dec fk5_trans = ic_trans.transform_to(FK5) assert not allclose(fk5_2.ra, fk5_trans.ra, rtol=0, atol=1e-10*u.deg) # But if you explicitly give the right equinox, all is fine fk5_trans_2 = fk5_2.transform_to(FK5(equinox=J2001)) assert_allclose(fk5_2.ra, fk5_trans_2.ra, rtol=0, atol=1e-10*u.deg) # Trying to transforming a frame with no data is of course an error: with raises(ValueError): FK5(equinox=J2001).transform_to(ICRS) # To actually define a new transformation, the same scheme as in the # 0.2/0.3 coordinates framework can be re-used - a graph of transform functions # connecting various coordinate classes together. The main changes are: # 1) The transform functions now get the frame object they are transforming the # current data into. # 2) Frames with additional information need to have a way to transform between # objects of the same class, but with different framespecinfo values # An example transform function: class SomeNewSystem(BaseCoordinateFrame): pass @frame_transform_graph.transform(DynamicMatrixTransform, SomeNewSystem, FK5) def new_to_fk5(newobj, fk5frame): ot = newobj.obstime eq = fk5frame.equinox # ... build a *cartesian* transform matrix using `eq` that transforms from # the `newobj` frame as observed at `ot` to FK5 an equinox `eq` matrix = np.eye(3) return matrix # Other options for transform functions include one that simply returns the new # coordinate object, and one that returns a cartesian matrix but does *not* # require `newobj` or `fk5frame` - this allows optimization of the transform. def test_highlevel_api(): J2001 = time.Time('J2001', scale='utc') #<---------------------------"High-level" class--------------------------------> # The "high-level" class is intended to wrap the lower-level classes in such a # way that they can be round-tripped, as well as providing a variety of # convenience functionality. This document is not intended to show *all* of the # possible high-level functionality, rather how the high-level classes are # initialized and interact with the low-level classes # this creates an object that contains an `ICRS` low-level class, initialized # identically to the first ICRS example further up. sc = coords.SkyCoord(coords.SphericalRepresentation(lon=8 * u.hour, lat=5 * u.deg, distance=1 * u.kpc), frame='icrs') # Other representations and `system` keywords delegate to the appropriate # low-level class. The already-existing registry for user-defined coordinates # will be used by `SkyCoordinate` to figure out what various the `system` # keyword actually means. sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs') sc = coords.SkyCoord(l=120 * u.deg, b=5 * u.deg, frame='galactic') # High-level classes can also be initialized directly from low-level objects sc = coords.SkyCoord(coords.ICRS(ra=8 * u.hour, dec=5 * u.deg)) # The next example raises an error because the high-level class must always # have position data. with pytest.raises(ValueError): sc = coords.SkyCoord(coords.FK5(equinox=J2001)) # raises ValueError # similarly, the low-level object can always be accessed #this is how it's supposed to look, but sometimes the numbers get rounded in #funny ways #assert repr(sc.frame) == '<ICRS Coordinate: ra=120.0 deg, dec=5.0 deg>' rscf = repr(sc.frame) assert rscf.startswith('<ICRS Coordinate: (ra, dec) in deg') # and the string representation will be inherited from the low-level class. # same deal, should loook like this, but different archituectures/ python # versions may round the numbers differently #assert repr(sc) == '<SkyCoord (ICRS): ra=120.0 deg, dec=5.0 deg>' rsc = repr(sc) assert rsc.startswith('<SkyCoord (ICRS): (ra, dec) in deg') # Supports a variety of possible complex string formats sc = coords.SkyCoord('8h00m00s +5d00m00.0s', frame='icrs') # In the next example, the unit is only needed b/c units are ambiguous. In # general, we *never* accept ambiguity sc = coords.SkyCoord('8:00:00 +5:00:00.0', unit=(u.hour, u.deg), frame='icrs') # The next one would yield length-2 array coordinates, because of the comma sc = coords.SkyCoord(['8h 5d', '2°2′3″ 0.3rad'], frame='icrs') # It should also interpret common designation styles as a coordinate # NOT YET # sc = coords.SkyCoord('SDSS J123456.89-012345.6', frame='icrs') # but it should also be possible to provide formats for outputting to strings, # similar to `Time`. This can be added right away or at a later date. # transformation is done the same as for low-level classes, which it delegates to sc_fk5_j2001 = sc.transform_to(coords.FK5(equinox=J2001)) assert sc_fk5_j2001.equinox == J2001 # The key difference is that the high-level class remembers frame information # necessary for round-tripping, unlike the low-level classes: sc1 = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, equinox=J2001, frame='fk5') sc2 = sc1.transform_to('icrs') # The next assertion succeeds, but it doesn't mean anything for ICRS, as ICRS # isn't defined in terms of an equinox assert sc2.equinox == J2001 # But it *is* necessary once we transform to FK5 sc3 = sc2.transform_to('fk5') assert sc3.equinox == J2001 assert_allclose(sc1.ra, sc3.ra) # `SkyCoord` will also include the attribute-style access that is in the # v0.2/0.3 coordinate objects. This will *not* be in the low-level classes sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs') scgal = sc.galactic assert str(scgal).startswith('<SkyCoord (Galactic): (l, b)') # the existing `from_name` and `match_to_catalog_*` methods will be moved to the # high-level class as convenience functionality. #in remote-data test below! #m31icrs = coords.SkyCoord.from_name('M31', frame='icrs') #assert str(m31icrs) == '<SkyCoord (ICRS) RA=10.68471 deg, Dec=41.26875 deg>' if HAS_SCIPY: cat1 = coords.SkyCoord(ra=[1, 2]*u.hr, dec=[3, 4.01]*u.deg, distance=[5, 6]*u.kpc, frame='icrs') cat2 = coords.SkyCoord(ra=[1, 2, 2.01]*u.hr, dec=[3, 4, 5]*u.deg, distance=[5, 200, 6]*u.kpc, frame='icrs') idx1, sep2d1, dist3d1 = cat1.match_to_catalog_sky(cat2) idx2, sep2d2, dist3d2 = cat1.match_to_catalog_3d(cat2) assert np.any(idx1 != idx2) # additional convenience functionality for the future should be added as methods # on `SkyCoord`, *not* the low-level classes. @pytest.mark.remote_data def test_highlevel_api_remote(): m31icrs = coords.SkyCoord.from_name('M31', frame='icrs') assert str(m31icrs) == '<SkyCoord (ICRS): (ra, dec) in deg\n (10.6847083, 41.26875)>' m31fk4 = coords.SkyCoord.from_name('M31', frame='fk4') assert m31icrs.frame != m31fk4.frame assert np.abs(m31icrs.ra - m31fk4.ra) > .5*u.deg
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from numpy import testing as npt from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose from ... import units as u from ...utils import minversion """ These are the tests for coordinate matching. Note that this requires scipy. """ try: import scipy HAS_SCIPY = True except ImportError: HAS_SCIPY = False if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False): OLDER_SCIPY = False else: OLDER_SCIPY = True @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function(): from .. import ICRS from ..matching import match_coordinates_3d #this only uses match_coordinates_3d because that's the actual implementation cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) npt.assert_array_almost_equal(d2d.degree, [0, 0.1]) assert d3d.value[0] == 0 idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2) assert np.all(idx == 2) npt.assert_array_almost_equal(d2d.degree, [1, 0.9]) npt.assert_array_less(d3d.value, 0.02) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_function_3d_and_sky(): from .. import ICRS from ..matching import match_coordinates_3d, match_coordinates_sky cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx, [2, 3]) assert_allclose(d2d, [1, 1.9] * u.deg) assert np.abs(d3d[0].to(u.kpc).value - np.radians(1)) < 1e-6 assert np.abs(d3d[1].to(u.kpc).value - 5*np.radians(1.9)) < 1e-5 idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx, [3, 1]) assert_allclose(d2d, [0, 0.1] * u.deg) assert_allclose(d3d, [4, 4.0000019] * u.kpc) @pytest.mark.skipif(str('not HAS_SCIPY')) def test_kdtree_storage(): from .. import ICRS from ..matching import match_coordinates_3d cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree) ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree) idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=False) assert not hasattr(ccatalog, '_kdtree') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree=True) assert hasattr(ccatalog, '_kdtree') assert not hasattr(ccatalog, 'tislit_cheese') idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, storekdtree='tislit_cheese') assert hasattr(ccatalog, 'tislit_cheese') assert not hasattr(cmatch, 'tislit_cheese') @pytest.mark.skipif(str('not HAS_SCIPY')) def test_matching_method(): from .. import ICRS, SkyCoord from ...utils import NumpyRNGContext from ..matching import match_coordinates_3d, match_coordinates_sky with NumpyRNGContext(987654321): cmatch = ICRS(np.random.rand(20) * 360.*u.degree, (np.random.rand(20) * 180. - 90.)*u.degree) ccatalog = ICRS(np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.)*u.degree) idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) #should be the same as above because there's no distance, but just make sure this method works idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) assert len(idx1) == len(d2d1) == len(d3d1) == 20 @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around(): from .. import ICRS from ..matching import search_around_sky, search_around_3d coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc) coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc) idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg) idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg) assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)] assert d2d_1deg[0] == 1.0*u.deg assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg) assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)] idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc) idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc) assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)] assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)] assert_allclose(d2d_sm, [2, 1]*u.deg) @pytest.mark.skipif(str('not HAS_SCIPY')) @pytest.mark.skipif(str('OLDER_SCIPY')) def test_search_around_scalar(): from astropy.coordinates import SkyCoord, Angle cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg") target = SkyCoord('1.1 -30.1', unit="deg") with pytest.raises(ValueError) as excinfo: cat.search_around_sky(target, Angle('2d')) # make sure the error message is *specific* to search_around_sky rather than # generic as reported in #3359 assert 'search_around_sky' in str(excinfo.value) with pytest.raises(ValueError) as excinfo: cat.search_around_3d(target, Angle('2d')) assert 'search_around_3d' in str(excinfo.value)
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/coordinates/tests/test_matching.py
scib/lib/python3.5/site-packages/astropy/coordinates/tests/test_api_ape5.py
# This file is part of the Trezor project. # # Copyright (C) 2012-2018 SatoshiLabs and contributors # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 # as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the License along with this library. # If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>. import atexit import logging import sys import time from typing import Iterable, Optional from . import TREZORS, UDEV_RULES_STR, TransportException from .protocol import ProtocolBasedTransport, ProtocolV1 LOG = logging.getLogger(__name__) try: import usb1 except Exception as e: LOG.warning("WebUSB transport is disabled: {}".format(e)) usb1 = None if False: # mark Optional as used, otherwise it only exists in comments Optional INTERFACE = 0 ENDPOINT = 1 DEBUG_INTERFACE = 1 DEBUG_ENDPOINT = 2 class WebUsbHandle: def __init__(self, device: "usb1.USBDevice", debug: bool = False) -> None: self.device = device self.interface = DEBUG_INTERFACE if debug else INTERFACE self.endpoint = DEBUG_ENDPOINT if debug else ENDPOINT self.count = 0 self.handle = None # type: Optional[usb1.USBDeviceHandle] def open(self) -> None: self.handle = self.device.open() if self.handle is None: if sys.platform.startswith("linux"): args = (UDEV_RULES_STR,) else: args = () raise IOError("Cannot open device", *args) self.handle.claimInterface(self.interface) def close(self) -> None: if self.handle is not None: self.handle.releaseInterface(self.interface) self.handle.close() self.handle = None def write_chunk(self, chunk: bytes) -> None: assert self.handle is not None if len(chunk) != 64: raise TransportException("Unexpected chunk size: %d" % len(chunk)) self.handle.interruptWrite(self.endpoint, chunk) def read_chunk(self) -> bytes: assert self.handle is not None endpoint = 0x80 | self.endpoint while True: chunk = self.handle.interruptRead(endpoint, 64) if chunk: break else: time.sleep(0.001) if len(chunk) != 64: raise TransportException("Unexpected chunk size: %d" % len(chunk)) return chunk class WebUsbTransport(ProtocolBasedTransport): """ WebUsbTransport implements transport over WebUSB interface. """ PATH_PREFIX = "webusb" ENABLED = usb1 is not None context = None def __init__( self, device: str, handle: WebUsbHandle = None, debug: bool = False ) -> None: if handle is None: handle = WebUsbHandle(device, debug) self.device = device self.handle = handle self.debug = debug super().__init__(protocol=ProtocolV1(handle)) def get_path(self) -> str: return "%s:%s" % (self.PATH_PREFIX, dev_to_str(self.device)) @classmethod def enumerate(cls) -> Iterable["WebUsbTransport"]: if cls.context is None: cls.context = usb1.USBContext() cls.context.open() atexit.register(cls.context.close) devices = [] for dev in cls.context.getDeviceIterator(skip_on_error=True): usb_id = (dev.getVendorID(), dev.getProductID()) if usb_id not in TREZORS: continue if not is_vendor_class(dev): continue try: # workaround for issue #223: # on certain combinations of Windows USB drivers and libusb versions, # Trezor is returned twice (possibly because Windows know it as both # a HID and a WebUSB device), and one of the returned devices is # non-functional. dev.getProduct() devices.append(WebUsbTransport(dev)) except usb1.USBErrorNotSupported: pass return devices def find_debug(self) -> "WebUsbTransport": if self.protocol.VERSION >= 2: # TODO test this # XXX this is broken right now because sessions don't really work # For v2 protocol, use the same WebUSB interface with a different session return WebUsbTransport(self.device, self.handle) else: # For v1 protocol, find debug USB interface for the same serial number return WebUsbTransport(self.device, debug=True) def is_vendor_class(dev: "usb1.USBDevice") -> bool: configurationId = 0 altSettingId = 0 return ( dev[configurationId][INTERFACE][altSettingId].getClass() == usb1.libusb1.LIBUSB_CLASS_VENDOR_SPEC ) def dev_to_str(dev: "usb1.USBDevice") -> str: return ":".join( str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList() )
# This file is part of the Trezor project. # # Copyright (C) 2012-2018 SatoshiLabs and contributors # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 # as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the License along with this library. # If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>. import pytest from trezorlib import ethereum from .common import TrezorTest @pytest.mark.ethereum class TestMsgEthereumVerifymessage(TrezorTest): ADDRESS = "cb3864960e8db1a751212c580af27ee8867d688f" VECTORS = [ ( "This is an example of a signed message.", "b7837058907192dbc9427bf57d93a0acca3816c92927a08be573b785f2d72dab65dad9c92fbe03a358acdb455eab2107b869945d11f4e353d9cc6ea957d08a871b", ), ( "VeryLongMessage!" * 64, "da2b73b0170479c2bfba3dd4839bf0d67732a44df8c873f3f3a2aca8a57d7bdc0b5d534f54c649e2d44135717001998b176d3cd1212366464db51f5838430fb31c", ), ] def test_verify(self): self.setup_mnemonic_nopin_nopassphrase() for msg, sig in self.VECTORS: res = ethereum.verify_message( self.client, bytes.fromhex(self.ADDRESS), bytes.fromhex(sig), msg ) assert res is True def test_verify_invalid(self): self.setup_mnemonic_nopin_nopassphrase() signature = bytes.fromhex(self.VECTORS[0][1]) res = ethereum.verify_message( self.client, bytes.fromhex(self.ADDRESS), signature, "another message" ) assert res is False
jhoenicke/python-trezor
trezorlib/tests/device_tests/test_msg_ethereum_verifymessage.py
trezorlib/transport/webusb.py
""" Platform for retrieving energy data from SRP. For more details about this platform, please refer to the documentation https://home-assistant.io/components/sensor.srp_energy/ """ from datetime import datetime, timedelta import logging from requests.exceptions import ( ConnectionError as ConnectError, HTTPError, Timeout) import voluptuous as vol from homeassistant.const import ( CONF_NAME, CONF_PASSWORD, CONF_USERNAME, CONF_ID) import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.helpers.entity import Entity REQUIREMENTS = ['srpenergy==1.0.5'] _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Powered by SRP Energy" DEFAULT_NAME = 'SRP Energy' MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1440) ENERGY_KWH = 'kWh' ATTR_READING_COST = "reading_cost" ATTR_READING_TIME = 'datetime' ATTR_READING_USAGE = 'reading_usage' ATTR_DAILY_USAGE = 'daily_usage' ATTR_USAGE_HISTORY = 'usage_history' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_ID): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the SRP energy.""" name = config[CONF_NAME] username = config[CONF_USERNAME] password = config[CONF_PASSWORD] account_id = config[CONF_ID] from srpenergy.client import SrpEnergyClient srp_client = SrpEnergyClient(account_id, username, password) if not srp_client.validate(): _LOGGER.error("Couldn't connect to %s. Check credentials", name) return add_entities([SrpEnergy(name, srp_client)], True) class SrpEnergy(Entity): """Representation of an srp usage.""" def __init__(self, name, client): """Initialize SRP Usage.""" self._state = None self._name = name self._client = client self._history = None self._usage = None @property def attribution(self): """Return the attribution.""" return ATTRIBUTION @property def state(self): """Return the current state.""" if self._state is None: return None return "{0:.2f}".format(self._state) @property def name(self): """Return the name of the sensor.""" return self._name @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return ENERGY_KWH @property def history(self): """Return the energy usage history of this entity, if any.""" if self._usage is None: return None history = [{ ATTR_READING_TIME: isodate, ATTR_READING_USAGE: kwh, ATTR_READING_COST: cost } for _, _, isodate, kwh, cost in self._usage] return history @property def device_state_attributes(self): """Return the state attributes.""" attributes = { ATTR_USAGE_HISTORY: self.history } return attributes @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Get the latest usage from SRP Energy.""" start_date = datetime.now() + timedelta(days=-1) end_date = datetime.now() try: usage = self._client.usage(start_date, end_date) daily_usage = 0.0 for _, _, _, kwh, _ in usage: daily_usage += float(kwh) if usage: self._state = daily_usage self._usage = usage else: _LOGGER.error("Unable to fetch data from SRP. No data") except (ConnectError, HTTPError, Timeout) as error: _LOGGER.error("Unable to connect to SRP. %s", error) except ValueError as error: _LOGGER.error("Value error connecting to SRP. %s", error) except TypeError as error: _LOGGER.error("Type error connecting to SRP. " "Check username and password. %s", error)
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/sensor/srp_energy.py
""" Helpers for Zigbee Home Automation. For more details about this component, please refer to the documentation at https://home-assistant.io/components/zha/ """ import asyncio import logging from .const import ( DEFAULT_BAUDRATE, REPORT_CONFIG_MAX_INT, REPORT_CONFIG_MIN_INT, REPORT_CONFIG_RPT_CHANGE, RadioType) _LOGGER = logging.getLogger(__name__) async def safe_read(cluster, attributes, allow_cache=True, only_cache=False, manufacturer=None): """Swallow all exceptions from network read. If we throw during initialization, setup fails. Rather have an entity that exists, but is in a maybe wrong state, than no entity. This method should probably only be used during initialization. """ try: result, _ = await cluster.read_attributes( attributes, allow_cache=allow_cache, only_cache=only_cache, manufacturer=manufacturer ) return result except Exception: # pylint: disable=broad-except return {} async def bind_cluster(entity_id, cluster): """Bind a zigbee cluster. This also swallows DeliveryError exceptions that are thrown when devices are unreachable. """ from zigpy.exceptions import DeliveryError cluster_name = cluster.ep_attribute try: res = await cluster.bind() _LOGGER.debug( "%s: bound '%s' cluster: %s", entity_id, cluster_name, res[0] ) except DeliveryError as ex: _LOGGER.debug( "%s: Failed to bind '%s' cluster: %s", entity_id, cluster_name, str(ex) ) async def configure_reporting(entity_id, cluster, attr, min_report=REPORT_CONFIG_MIN_INT, max_report=REPORT_CONFIG_MAX_INT, reportable_change=REPORT_CONFIG_RPT_CHANGE, manufacturer=None): """Configure attribute reporting for a cluster. This also swallows DeliveryError exceptions that are thrown when devices are unreachable. """ from zigpy.exceptions import DeliveryError attr_name = cluster.attributes.get(attr, [attr])[0] attr_id = get_attr_id_by_name(cluster, attr_name) cluster_name = cluster.ep_attribute kwargs = {} if manufacturer: kwargs['manufacturer'] = manufacturer try: res = await cluster.configure_reporting(attr_id, min_report, max_report, reportable_change, **kwargs) _LOGGER.debug( "%s: reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'", entity_id, attr_name, cluster_name, min_report, max_report, reportable_change, res ) except DeliveryError as ex: _LOGGER.debug( "%s: failed to set reporting for '%s' attr on '%s' cluster: %s", entity_id, attr_name, cluster_name, str(ex) ) async def bind_configure_reporting(entity_id, cluster, attr, skip_bind=False, min_report=REPORT_CONFIG_MIN_INT, max_report=REPORT_CONFIG_MAX_INT, reportable_change=REPORT_CONFIG_RPT_CHANGE, manufacturer=None): """Bind and configure zigbee attribute reporting for a cluster. This also swallows DeliveryError exceptions that are thrown when devices are unreachable. """ if not skip_bind: await bind_cluster(entity_id, cluster) await configure_reporting(entity_id, cluster, attr, min_report=min_report, max_report=max_report, reportable_change=reportable_change, manufacturer=manufacturer) async def check_zigpy_connection(usb_path, radio_type, database_path): """Test zigpy radio connection.""" if radio_type == RadioType.ezsp.name: import bellows.ezsp from bellows.zigbee.application import ControllerApplication radio = bellows.ezsp.EZSP() elif radio_type == RadioType.xbee.name: import zigpy_xbee.api from zigpy_xbee.zigbee.application import ControllerApplication radio = zigpy_xbee.api.XBee() elif radio_type == RadioType.deconz.name: import zigpy_deconz.api from zigpy_deconz.zigbee.application import ControllerApplication radio = zigpy_deconz.api.Deconz() try: await radio.connect(usb_path, DEFAULT_BAUDRATE) controller = ControllerApplication(radio, database_path) await asyncio.wait_for(controller.startup(auto_form=True), timeout=30) radio.close() except Exception: # pylint: disable=broad-except return False return True def convert_ieee(ieee_str): """Convert given ieee string to EUI64.""" from zigpy.types import EUI64, uint8_t return EUI64([uint8_t(p, base=16) for p in ieee_str.split(':')]) def construct_unique_id(cluster): """Construct a unique id from a cluster.""" return "0x{:04x}:{}:0x{:04x}".format( cluster.endpoint.device.nwk, cluster.endpoint.endpoint_id, cluster.cluster_id ) def get_attr_id_by_name(cluster, attr_name): """Get the attribute id for a cluster attribute by its name.""" return next((attrid for attrid, (attrname, datatype) in cluster.attributes.items() if attr_name == attrname), None)
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/zha/core/helpers.py
""" Support for the Fitbit API. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.fitbit/ """ import os import logging import datetime import time import voluptuous as vol from homeassistant.core import callback from homeassistant.components.http import HomeAssistantView from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ATTRIBUTION from homeassistant.const import CONF_UNIT_SYSTEM from homeassistant.helpers.entity import Entity from homeassistant.helpers.icon import icon_for_battery_level import homeassistant.helpers.config_validation as cv from homeassistant.util.json import load_json, save_json REQUIREMENTS = ['fitbit==0.3.0'] _CONFIGURING = {} _LOGGER = logging.getLogger(__name__) ATTR_ACCESS_TOKEN = 'access_token' ATTR_REFRESH_TOKEN = 'refresh_token' ATTR_CLIENT_ID = 'client_id' ATTR_CLIENT_SECRET = 'client_secret' ATTR_LAST_SAVED_AT = 'last_saved_at' CONF_MONITORED_RESOURCES = 'monitored_resources' CONF_CLOCK_FORMAT = 'clock_format' CONF_ATTRIBUTION = 'Data provided by Fitbit.com' DEPENDENCIES = ['http'] FITBIT_AUTH_CALLBACK_PATH = '/api/fitbit/callback' FITBIT_AUTH_START = '/api/fitbit' FITBIT_CONFIG_FILE = 'fitbit.conf' FITBIT_DEFAULT_RESOURCES = ['activities/steps'] SCAN_INTERVAL = datetime.timedelta(minutes=30) DEFAULT_CONFIG = { 'client_id': 'CLIENT_ID_HERE', 'client_secret': 'CLIENT_SECRET_HERE' } FITBIT_RESOURCES_LIST = { 'activities/activityCalories': ['Activity Calories', 'cal', 'fire'], 'activities/calories': ['Calories', 'cal', 'fire'], 'activities/caloriesBMR': ['Calories BMR', 'cal', 'fire'], 'activities/distance': ['Distance', '', 'map-marker'], 'activities/elevation': ['Elevation', '', 'walk'], 'activities/floors': ['Floors', 'floors', 'walk'], 'activities/heart': ['Resting Heart Rate', 'bpm', 'heart-pulse'], 'activities/minutesFairlyActive': ['Minutes Fairly Active', 'minutes', 'walk'], 'activities/minutesLightlyActive': ['Minutes Lightly Active', 'minutes', 'walk'], 'activities/minutesSedentary': ['Minutes Sedentary', 'minutes', 'seat-recline-normal'], 'activities/minutesVeryActive': ['Minutes Very Active', 'minutes', 'run'], 'activities/steps': ['Steps', 'steps', 'walk'], 'activities/tracker/activityCalories': ['Tracker Activity Calories', 'cal', 'fire'], 'activities/tracker/calories': ['Tracker Calories', 'cal', 'fire'], 'activities/tracker/distance': ['Tracker Distance', '', 'map-marker'], 'activities/tracker/elevation': ['Tracker Elevation', '', 'walk'], 'activities/tracker/floors': ['Tracker Floors', 'floors', 'walk'], 'activities/tracker/minutesFairlyActive': ['Tracker Minutes Fairly Active', 'minutes', 'walk'], 'activities/tracker/minutesLightlyActive': ['Tracker Minutes Lightly Active', 'minutes', 'walk'], 'activities/tracker/minutesSedentary': ['Tracker Minutes Sedentary', 'minutes', 'seat-recline-normal'], 'activities/tracker/minutesVeryActive': ['Tracker Minutes Very Active', 'minutes', 'run'], 'activities/tracker/steps': ['Tracker Steps', 'steps', 'walk'], 'body/bmi': ['BMI', 'BMI', 'human'], 'body/fat': ['Body Fat', '%', 'human'], 'body/weight': ['Weight', '', 'human'], 'devices/battery': ['Battery', None, None], 'sleep/awakeningsCount': ['Awakenings Count', 'times awaken', 'sleep'], 'sleep/efficiency': ['Sleep Efficiency', '%', 'sleep'], 'sleep/minutesAfterWakeup': ['Minutes After Wakeup', 'minutes', 'sleep'], 'sleep/minutesAsleep': ['Sleep Minutes Asleep', 'minutes', 'sleep'], 'sleep/minutesAwake': ['Sleep Minutes Awake', 'minutes', 'sleep'], 'sleep/minutesToFallAsleep': ['Sleep Minutes to Fall Asleep', 'minutes', 'sleep'], 'sleep/startTime': ['Sleep Start Time', None, 'clock'], 'sleep/timeInBed': ['Sleep Time in Bed', 'minutes', 'hotel'] } FITBIT_MEASUREMENTS = { 'en_US': { 'duration': 'ms', 'distance': 'mi', 'elevation': 'ft', 'height': 'in', 'weight': 'lbs', 'body': 'in', 'liquids': 'fl. oz.', 'blood glucose': 'mg/dL', 'battery': '', }, 'en_GB': { 'duration': 'milliseconds', 'distance': 'kilometers', 'elevation': 'meters', 'height': 'centimeters', 'weight': 'stone', 'body': 'centimeters', 'liquids': 'milliliters', 'blood glucose': 'mmol/L', 'battery': '', }, 'metric': { 'duration': 'milliseconds', 'distance': 'kilometers', 'elevation': 'meters', 'height': 'centimeters', 'weight': 'kilograms', 'body': 'centimeters', 'liquids': 'milliliters', 'blood glucose': 'mmol/L', 'battery': '', } } BATTERY_LEVELS = { 'High': 100, 'Medium': 50, 'Low': 20, 'Empty': 0 } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_MONITORED_RESOURCES, default=FITBIT_DEFAULT_RESOURCES): vol.All(cv.ensure_list, [vol.In(FITBIT_RESOURCES_LIST)]), vol.Optional(CONF_CLOCK_FORMAT, default='24H'): vol.In(['12H', '24H']), vol.Optional(CONF_UNIT_SYSTEM, default='default'): vol.In(['en_GB', 'en_US', 'metric', 'default']) }) def request_app_setup(hass, config, add_entities, config_path, discovery_info=None): """Assist user with configuring the Fitbit dev application.""" configurator = hass.components.configurator def fitbit_configuration_callback(callback_data): """Handle configuration updates.""" config_path = hass.config.path(FITBIT_CONFIG_FILE) if os.path.isfile(config_path): config_file = load_json(config_path) if config_file == DEFAULT_CONFIG: error_msg = ("You didn't correctly modify fitbit.conf", " please try again") configurator.notify_errors(_CONFIGURING['fitbit'], error_msg) else: setup_platform(hass, config, add_entities, discovery_info) else: setup_platform(hass, config, add_entities, discovery_info) start_url = "{}{}".format(hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH) description = """Please create a Fitbit developer app at https://dev.fitbit.com/apps/new. For the OAuth 2.0 Application Type choose Personal. Set the Callback URL to {}. They will provide you a Client ID and secret. These need to be saved into the file located at: {}. Then come back here and hit the below button. """.format(start_url, config_path) submit = "I have saved my Client ID and Client Secret into fitbit.conf." _CONFIGURING['fitbit'] = configurator.request_config( 'Fitbit', fitbit_configuration_callback, description=description, submit_caption=submit, description_image="/static/images/config_fitbit_app.png" ) def request_oauth_completion(hass): """Request user complete Fitbit OAuth2 flow.""" configurator = hass.components.configurator if "fitbit" in _CONFIGURING: configurator.notify_errors( _CONFIGURING['fitbit'], "Failed to register, please try again.") return def fitbit_configuration_callback(callback_data): """Handle configuration updates.""" start_url = '{}{}'.format(hass.config.api.base_url, FITBIT_AUTH_START) description = "Please authorize Fitbit by visiting {}".format(start_url) _CONFIGURING['fitbit'] = configurator.request_config( 'Fitbit', fitbit_configuration_callback, description=description, submit_caption="I have authorized Fitbit." ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Fitbit sensor.""" config_path = hass.config.path(FITBIT_CONFIG_FILE) if os.path.isfile(config_path): config_file = load_json(config_path) if config_file == DEFAULT_CONFIG: request_app_setup( hass, config, add_entities, config_path, discovery_info=None) return False else: save_json(config_path, DEFAULT_CONFIG) request_app_setup( hass, config, add_entities, config_path, discovery_info=None) return False if "fitbit" in _CONFIGURING: hass.components.configurator.request_done(_CONFIGURING.pop("fitbit")) import fitbit access_token = config_file.get(ATTR_ACCESS_TOKEN) refresh_token = config_file.get(ATTR_REFRESH_TOKEN) expires_at = config_file.get(ATTR_LAST_SAVED_AT) if None not in (access_token, refresh_token): authd_client = fitbit.Fitbit(config_file.get(ATTR_CLIENT_ID), config_file.get(ATTR_CLIENT_SECRET), access_token=access_token, refresh_token=refresh_token, expires_at=expires_at, refresh_cb=lambda x: None) if int(time.time()) - expires_at > 3600: authd_client.client.refresh_token() unit_system = config.get(CONF_UNIT_SYSTEM) if unit_system == 'default': authd_client.system = authd_client. \ user_profile_get()["user"]["locale"] if authd_client.system != 'en_GB': if hass.config.units.is_metric: authd_client.system = 'metric' else: authd_client.system = 'en_US' else: authd_client.system = unit_system dev = [] registered_devs = authd_client.get_devices() clock_format = config.get(CONF_CLOCK_FORMAT) for resource in config.get(CONF_MONITORED_RESOURCES): # monitor battery for all linked FitBit devices if resource == 'devices/battery': for dev_extra in registered_devs: dev.append(FitbitSensor( authd_client, config_path, resource, hass.config.units.is_metric, clock_format, dev_extra)) else: dev.append(FitbitSensor( authd_client, config_path, resource, hass.config.units.is_metric, clock_format)) add_entities(dev, True) else: oauth = fitbit.api.FitbitOauth2Client( config_file.get(ATTR_CLIENT_ID), config_file.get(ATTR_CLIENT_SECRET)) redirect_uri = '{}{}'.format(hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH) fitbit_auth_start_url, _ = oauth.authorize_token_url( redirect_uri=redirect_uri, scope=['activity', 'heartrate', 'nutrition', 'profile', 'settings', 'sleep', 'weight']) hass.http.register_redirect(FITBIT_AUTH_START, fitbit_auth_start_url) hass.http.register_view(FitbitAuthCallbackView( config, add_entities, oauth)) request_oauth_completion(hass) class FitbitAuthCallbackView(HomeAssistantView): """Handle OAuth finish callback requests.""" requires_auth = False url = FITBIT_AUTH_CALLBACK_PATH name = 'api:fitbit:callback' def __init__(self, config, add_entities, oauth): """Initialize the OAuth callback view.""" self.config = config self.add_entities = add_entities self.oauth = oauth @callback def get(self, request): """Finish OAuth callback request.""" from oauthlib.oauth2.rfc6749.errors import MismatchingStateError from oauthlib.oauth2.rfc6749.errors import MissingTokenError hass = request.app['hass'] data = request.query response_message = """Fitbit has been successfully authorized! You can close this window now!""" result = None if data.get('code') is not None: redirect_uri = '{}{}'.format( hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH) try: result = self.oauth.fetch_access_token(data.get('code'), redirect_uri) except MissingTokenError as error: _LOGGER.error("Missing token: %s", error) response_message = """Something went wrong when attempting authenticating with Fitbit. The error encountered was {}. Please try again!""".format(error) except MismatchingStateError as error: _LOGGER.error("Mismatched state, CSRF error: %s", error) response_message = """Something went wrong when attempting authenticating with Fitbit. The error encountered was {}. Please try again!""".format(error) else: _LOGGER.error("Unknown error when authing") response_message = """Something went wrong when attempting authenticating with Fitbit. An unknown error occurred. Please try again! """ if result is None: _LOGGER.error("Unknown error when authing") response_message = """Something went wrong when attempting authenticating with Fitbit. An unknown error occurred. Please try again! """ html_response = """<html><head><title>Fitbit Auth</title></head> <body><h1>{}</h1></body></html>""".format(response_message) if result: config_contents = { ATTR_ACCESS_TOKEN: result.get('access_token'), ATTR_REFRESH_TOKEN: result.get('refresh_token'), ATTR_CLIENT_ID: self.oauth.client_id, ATTR_CLIENT_SECRET: self.oauth.client_secret, ATTR_LAST_SAVED_AT: int(time.time()) } save_json(hass.config.path(FITBIT_CONFIG_FILE), config_contents) hass.async_add_job(setup_platform, hass, self.config, self.add_entities) return html_response class FitbitSensor(Entity): """Implementation of a Fitbit sensor.""" def __init__(self, client, config_path, resource_type, is_metric, clock_format, extra=None): """Initialize the Fitbit sensor.""" self.client = client self.config_path = config_path self.resource_type = resource_type self.is_metric = is_metric self.clock_format = clock_format self.extra = extra self._name = FITBIT_RESOURCES_LIST[self.resource_type][0] if self.extra: self._name = '{0} Battery'.format(self.extra.get('deviceVersion')) unit_type = FITBIT_RESOURCES_LIST[self.resource_type][1] if unit_type == "": split_resource = self.resource_type.split('/') try: measurement_system = FITBIT_MEASUREMENTS[self.client.system] except KeyError: if self.is_metric: measurement_system = FITBIT_MEASUREMENTS['metric'] else: measurement_system = FITBIT_MEASUREMENTS['en_US'] unit_type = measurement_system[split_resource[-1]] self._unit_of_measurement = unit_type self._state = 0 @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def icon(self): """Icon to use in the frontend, if any.""" if self.resource_type == 'devices/battery' and self.extra: battery_level = BATTERY_LEVELS[self.extra.get('battery')] return icon_for_battery_level(battery_level=battery_level, charging=None) return 'mdi:{}'.format(FITBIT_RESOURCES_LIST[self.resource_type][2]) @property def device_state_attributes(self): """Return the state attributes.""" attrs = {} attrs[ATTR_ATTRIBUTION] = CONF_ATTRIBUTION if self.extra: attrs['model'] = self.extra.get('deviceVersion') attrs['type'] = self.extra.get('type').lower() return attrs def update(self): """Get the latest data from the Fitbit API and update the states.""" if self.resource_type == 'devices/battery' and self.extra: self._state = self.extra.get('battery') else: container = self.resource_type.replace("/", "-") response = self.client.time_series(self.resource_type, period='7d') raw_state = response[container][-1].get('value') if self.resource_type == 'activities/distance': self._state = format(float(raw_state), '.2f') elif self.resource_type == 'activities/tracker/distance': self._state = format(float(raw_state), '.2f') elif self.resource_type == 'body/bmi': self._state = format(float(raw_state), '.1f') elif self.resource_type == 'body/fat': self._state = format(float(raw_state), '.1f') elif self.resource_type == 'body/weight': self._state = format(float(raw_state), '.1f') elif self.resource_type == 'sleep/startTime': if raw_state == '': self._state = '-' elif self.clock_format == '12H': hours, minutes = raw_state.split(':') hours, minutes = int(hours), int(minutes) setting = 'AM' if hours > 12: setting = 'PM' hours -= 12 elif hours == 0: hours = 12 self._state = '{}:{:02d} {}'.format(hours, minutes, setting) else: self._state = raw_state else: if self.is_metric: self._state = raw_state else: try: self._state = '{0:,}'.format(int(raw_state)) except TypeError: self._state = raw_state if self.resource_type == 'activities/heart': self._state = response[container][-1]. \ get('value').get('restingHeartRate') token = self.client.client.session.token config_contents = { ATTR_ACCESS_TOKEN: token.get('access_token'), ATTR_REFRESH_TOKEN: token.get('refresh_token'), ATTR_CLIENT_ID: self.client.client.client_id, ATTR_CLIENT_SECRET: self.client.client.client_secret, ATTR_LAST_SAVED_AT: int(time.time()) } save_json(self.config_path, config_contents)
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/sensor/fitbit.py
"""Support for EDP re:dy sensors.""" import logging from homeassistant.helpers.entity import Entity from homeassistant.components.edp_redy import EdpRedyDevice, EDP_REDY _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['edp_redy'] # Load power in watts (W) ATTR_ACTIVE_POWER = 'active_power' async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Perform the setup for re:dy devices.""" from edp_redy.session import ACTIVE_POWER_ID session = hass.data[EDP_REDY] devices = [] # Create sensors for modules for device_json in session.modules_dict.values(): if 'HA_POWER_METER' not in device_json['Capabilities']: continue devices.append(EdpRedyModuleSensor(session, device_json)) # Create a sensor for global active power devices.append(EdpRedySensor(session, ACTIVE_POWER_ID, "Power Home", 'mdi:flash', 'W')) async_add_entities(devices, True) class EdpRedySensor(EdpRedyDevice, Entity): """Representation of a EDP re:dy generic sensor.""" def __init__(self, session, sensor_id, name, icon, unit): """Initialize the sensor.""" super().__init__(session, sensor_id, name) self._icon = icon self._unit = unit @property def state(self): """Return the state of the sensor.""" return self._state @property def icon(self): """Return the icon to use in the frontend.""" return self._icon @property def unit_of_measurement(self): """Return the unit of measurement of this sensor.""" return self._unit async def async_update(self): """Parse the data for this sensor.""" if self._id in self._session.values_dict: self._state = self._session.values_dict[self._id] self._is_available = True else: self._is_available = False class EdpRedyModuleSensor(EdpRedyDevice, Entity): """Representation of a EDP re:dy module sensor.""" def __init__(self, session, device_json): """Initialize the sensor.""" super().__init__(session, device_json['PKID'], "Power {0}".format(device_json['Name'])) @property def state(self): """Return the state of the sensor.""" return self._state @property def icon(self): """Return the icon to use in the frontend.""" return 'mdi:flash' @property def unit_of_measurement(self): """Return the unit of measurement of this sensor.""" return 'W' async def async_update(self): """Parse the data for this sensor.""" if self._id in self._session.modules_dict: device_json = self._session.modules_dict[self._id] self._parse_data(device_json) else: self._is_available = False def _parse_data(self, data): """Parse data received from the server.""" super()._parse_data(data) _LOGGER.debug("Sensor data: %s", str(data)) for state_var in data['StateVars']: if state_var['Name'] == 'ActivePower': try: self._state = float(state_var['Value']) * 1000 except ValueError: _LOGGER.error("Could not parse power for %s", self._id) self._state = 0 self._is_available = False
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/edp_redy/sensor.py
"""Config flow for ZHA.""" from collections import OrderedDict import os import voluptuous as vol from homeassistant import config_entries from .core.const import ( CONF_RADIO_TYPE, CONF_USB_PATH, DEFAULT_DATABASE_NAME, DOMAIN, RadioType) from .core.helpers import check_zigpy_connection @config_entries.HANDLERS.register(DOMAIN) class ZhaFlowHandler(config_entries.ConfigFlow): """Handle a config flow.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH async def async_step_user(self, user_input=None): """Handle a zha config flow start.""" if self._async_current_entries(): return self.async_abort(reason='single_instance_allowed') errors = {} fields = OrderedDict() fields[vol.Required(CONF_USB_PATH)] = str fields[vol.Optional(CONF_RADIO_TYPE, default='ezsp')] = vol.In( RadioType.list() ) if user_input is not None: database = os.path.join(self.hass.config.config_dir, DEFAULT_DATABASE_NAME) test = await check_zigpy_connection(user_input[CONF_USB_PATH], user_input[CONF_RADIO_TYPE], database) if test: return self.async_create_entry( title=user_input[CONF_USB_PATH], data=user_input) errors['base'] = 'cannot_connect' return self.async_show_form( step_id='user', data_schema=vol.Schema(fields), errors=errors ) async def async_step_import(self, import_info): """Handle a zha config import.""" if self._async_current_entries(): return self.async_abort(reason='single_instance_allowed') return self.async_create_entry( title=import_info[CONF_USB_PATH], data=import_info )
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/zha/config_flow.py
""" Support for switching Arduino pins on and off. So far only digital pins are supported. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.arduino/ """ import logging import voluptuous as vol from homeassistant.components import arduino from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA) from homeassistant.const import CONF_NAME import homeassistant.helpers.config_validation as cv DEPENDENCIES = ['arduino'] _LOGGER = logging.getLogger(__name__) CONF_PINS = 'pins' CONF_TYPE = 'digital' CONF_NEGATE = 'negate' CONF_INITIAL = 'initial' PIN_SCHEMA = vol.Schema({ vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_INITIAL, default=False): cv.boolean, vol.Optional(CONF_NEGATE, default=False): cv.boolean, }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_PINS, default={}): vol.Schema({cv.positive_int: PIN_SCHEMA}), }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Arduino platform.""" # Verify that Arduino board is present if arduino.BOARD is None: _LOGGER.error("A connection has not been made to the Arduino board") return False pins = config.get(CONF_PINS) switches = [] for pinnum, pin in pins.items(): switches.append(ArduinoSwitch(pinnum, pin)) add_entities(switches) class ArduinoSwitch(SwitchDevice): """Representation of an Arduino switch.""" def __init__(self, pin, options): """Initialize the Pin.""" self._pin = pin self._name = options.get(CONF_NAME) self.pin_type = CONF_TYPE self.direction = 'out' self._state = options.get(CONF_INITIAL) if options.get(CONF_NEGATE): self.turn_on_handler = arduino.BOARD.set_digital_out_low self.turn_off_handler = arduino.BOARD.set_digital_out_high else: self.turn_on_handler = arduino.BOARD.set_digital_out_high self.turn_off_handler = arduino.BOARD.set_digital_out_low arduino.BOARD.set_mode(self._pin, self.direction, self.pin_type) (self.turn_on_handler if self._state else self.turn_off_handler)(pin) @property def name(self): """Get the name of the pin.""" return self._name @property def is_on(self): """Return true if pin is high/on.""" return self._state def turn_on(self, **kwargs): """Turn the pin to high/on.""" self._state = True self.turn_on_handler(self._pin) def turn_off(self, **kwargs): """Turn the pin to low/off.""" self._state = False self.turn_off_handler(self._pin)
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/arduino/switch.py
""" Support for functionality to interact with FireTV devices. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.firetv/ """ import functools import logging import threading import voluptuous as vol from homeassistant.components.media_player import ( MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.components.media_player.const import ( SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PORT, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_STANDBY) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['firetv==1.0.7'] _LOGGER = logging.getLogger(__name__) SUPPORT_FIRETV = SUPPORT_PAUSE | \ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \ SUPPORT_NEXT_TRACK | SUPPORT_SELECT_SOURCE | SUPPORT_STOP | \ SUPPORT_PLAY CONF_ADBKEY = 'adbkey' CONF_GET_SOURCE = 'get_source' CONF_GET_SOURCES = 'get_sources' DEFAULT_NAME = 'Amazon Fire TV' DEFAULT_PORT = 5555 DEFAULT_GET_SOURCE = True DEFAULT_GET_SOURCES = True def has_adb_files(value): """Check that ADB key files exist.""" priv_key = value pub_key = '{}.pub'.format(value) cv.isfile(pub_key) return cv.isfile(priv_key) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_ADBKEY): has_adb_files, vol.Optional(CONF_GET_SOURCE, default=DEFAULT_GET_SOURCE): cv.boolean, vol.Optional(CONF_GET_SOURCES, default=DEFAULT_GET_SOURCES): cv.boolean }) PACKAGE_LAUNCHER = "com.amazon.tv.launcher" PACKAGE_SETTINGS = "com.amazon.tv.settings" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the FireTV platform.""" from firetv import FireTV host = '{0}:{1}'.format(config[CONF_HOST], config[CONF_PORT]) if CONF_ADBKEY in config: ftv = FireTV(host, config[CONF_ADBKEY]) adb_log = " using adbkey='{0}'".format(config[CONF_ADBKEY]) else: ftv = FireTV(host) adb_log = "" if not ftv.available: _LOGGER.warning("Could not connect to Fire TV at %s%s", host, adb_log) return name = config[CONF_NAME] get_source = config[CONF_GET_SOURCE] get_sources = config[CONF_GET_SOURCES] device = FireTVDevice(ftv, name, get_source, get_sources) add_entities([device]) _LOGGER.debug("Setup Fire TV at %s%s", host, adb_log) def adb_decorator(override_available=False): """Send an ADB command if the device is available and not locked.""" def adb_wrapper(func): """Wait if previous ADB commands haven't finished.""" @functools.wraps(func) def _adb_wrapper(self, *args, **kwargs): # If the device is unavailable, don't do anything if not self.available and not override_available: return None # If an ADB command is already running, skip this command if not self.adb_lock.acquire(blocking=False): _LOGGER.info("Skipping an ADB command because a previous " "command is still running") return None # Additional ADB commands will be prevented while trying this one try: returns = func(self, *args, **kwargs) except self.exceptions as err: _LOGGER.error( "Failed to execute an ADB command. ADB connection re-" "establishing attempt in the next update. Error: %s", err) returns = None self._available = False # pylint: disable=protected-access finally: self.adb_lock.release() return returns return _adb_wrapper return adb_wrapper class FireTVDevice(MediaPlayerDevice): """Representation of an Amazon Fire TV device on the network.""" def __init__(self, ftv, name, get_source, get_sources): """Initialize the FireTV device.""" from adb.adb_protocol import ( InvalidChecksumError, InvalidCommandError, InvalidResponseError) self.firetv = ftv self._name = name self._get_source = get_source self._get_sources = get_sources # whether or not the ADB connection is currently in use self.adb_lock = threading.Lock() # ADB exceptions to catch self.exceptions = ( AttributeError, BrokenPipeError, TypeError, ValueError, InvalidChecksumError, InvalidCommandError, InvalidResponseError) self._state = None self._available = self.firetv.available self._current_app = None self._running_apps = None @property def name(self): """Return the device name.""" return self._name @property def should_poll(self): """Device should be polled.""" return True @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_FIRETV @property def state(self): """Return the state of the player.""" return self._state @property def available(self): """Return whether or not the ADB connection is valid.""" return self._available @property def app_id(self): """Return the current app.""" return self._current_app @property def source(self): """Return the current app.""" return self._current_app @property def source_list(self): """Return a list of running apps.""" return self._running_apps @adb_decorator(override_available=True) def update(self): """Get the latest date and update device state.""" # Check if device is disconnected. if not self._available: self._running_apps = None self._current_app = None # Try to connect self.firetv.connect() self._available = self.firetv.available # If the ADB connection is not intact, don't update. if not self._available: return # Check if device is off. if not self.firetv.screen_on: self._state = STATE_OFF self._running_apps = None self._current_app = None # Check if screen saver is on. elif not self.firetv.awake: self._state = STATE_IDLE self._running_apps = None self._current_app = None else: # Get the running apps. if self._get_sources: self._running_apps = self.firetv.running_apps # Get the current app. if self._get_source: current_app = self.firetv.current_app if isinstance(current_app, dict)\ and 'package' in current_app: self._current_app = current_app['package'] else: self._current_app = current_app # Show the current app as the only running app. if not self._get_sources: if self._current_app: self._running_apps = [self._current_app] else: self._running_apps = None # Check if the launcher is active. if self._current_app in [PACKAGE_LAUNCHER, PACKAGE_SETTINGS]: self._state = STATE_STANDBY # Check for a wake lock (device is playing). elif self.firetv.wake_lock: self._state = STATE_PLAYING # Otherwise, device is paused. else: self._state = STATE_PAUSED # Don't get the current app. elif self.firetv.wake_lock: # Check for a wake lock (device is playing). self._state = STATE_PLAYING else: # Assume the devices is on standby. self._state = STATE_STANDBY @adb_decorator() def turn_on(self): """Turn on the device.""" self.firetv.turn_on() @adb_decorator() def turn_off(self): """Turn off the device.""" self.firetv.turn_off() @adb_decorator() def media_play(self): """Send play command.""" self.firetv.media_play() @adb_decorator() def media_pause(self): """Send pause command.""" self.firetv.media_pause() @adb_decorator() def media_play_pause(self): """Send play/pause command.""" self.firetv.media_play_pause() @adb_decorator() def media_stop(self): """Send stop (back) command.""" self.firetv.back() @adb_decorator() def volume_up(self): """Send volume up command.""" self.firetv.volume_up() @adb_decorator() def volume_down(self): """Send volume down command.""" self.firetv.volume_down() @adb_decorator() def media_previous_track(self): """Send previous track command (results in rewind).""" self.firetv.media_previous() @adb_decorator() def media_next_track(self): """Send next track command (results in fast-forward).""" self.firetv.media_next() @adb_decorator() def select_source(self, source): """Select input source. If the source starts with a '!', then it will close the app instead of opening it. """ if isinstance(source, str): if not source.startswith('!'): self.firetv.launch_app(source) else: self.firetv.stop_app(source[1:].lstrip())
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/media_player/firetv.py
""" Read temperature information from Eddystone beacons. Your beacons must be configured to transmit UID (for identification) and TLM (for temperature) frames. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.eddystone_temperature/ """ import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, STATE_UNKNOWN, TEMP_CELSIUS) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity REQUIREMENTS = ['beacontools[scan]==1.2.3', 'construct==2.9.45'] _LOGGER = logging.getLogger(__name__) CONF_BEACONS = 'beacons' CONF_BT_DEVICE_ID = 'bt_device_id' CONF_INSTANCE = 'instance' CONF_NAMESPACE = 'namespace' BEACON_SCHEMA = vol.Schema({ vol.Required(CONF_NAMESPACE): cv.string, vol.Required(CONF_INSTANCE): cv.string, vol.Optional(CONF_NAME): cv.string }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_BT_DEVICE_ID, default=0): cv.positive_int, vol.Required(CONF_BEACONS): vol.Schema({cv.string: BEACON_SCHEMA}), }) def setup_platform(hass, config, add_entities, discovery_info=None): """Validate configuration, create devices and start monitoring thread.""" bt_device_id = config.get("bt_device_id") beacons = config.get(CONF_BEACONS) devices = [] for dev_name, properties in beacons.items(): namespace = get_from_conf(properties, CONF_NAMESPACE, 20) instance = get_from_conf(properties, CONF_INSTANCE, 12) name = properties.get(CONF_NAME, dev_name) if instance is None or namespace is None: _LOGGER.error("Skipping %s", dev_name) continue else: devices.append(EddystoneTemp(name, namespace, instance)) if devices: mon = Monitor(hass, devices, bt_device_id) def monitor_stop(_service_or_event): """Stop the monitor thread.""" _LOGGER.info("Stopping scanner for Eddystone beacons") mon.stop() def monitor_start(_service_or_event): """Start the monitor thread.""" _LOGGER.info("Starting scanner for Eddystone beacons") mon.start() add_entities(devices) mon.start() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop) hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start) else: _LOGGER.warning("No devices were added") def get_from_conf(config, config_key, length): """Retrieve value from config and validate length.""" string = config.get(config_key) if len(string) != length: _LOGGER.error("Error in config parameter %s: Must be exactly %d " "bytes. Device will not be added", config_key, length/2) return None return string class EddystoneTemp(Entity): """Representation of a temperature sensor.""" def __init__(self, name, namespace, instance): """Initialize a sensor.""" self._name = name self.namespace = namespace self.instance = instance self.bt_addr = None self.temperature = STATE_UNKNOWN @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the device.""" return self.temperature @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return TEMP_CELSIUS @property def should_poll(self): """Return the polling state.""" return False class Monitor: """Continuously scan for BLE advertisements.""" def __init__(self, hass, devices, bt_device_id): """Construct interface object.""" self.hass = hass # List of beacons to monitor self.devices = devices # Number of the bt device (hciX) self.bt_device_id = bt_device_id def callback(bt_addr, _, packet, additional_info): """Handle new packets.""" self.process_packet( additional_info['namespace'], additional_info['instance'], packet.temperature) from beacontools import ( # pylint: disable=import-error BeaconScanner, EddystoneFilter, EddystoneTLMFrame) device_filters = [EddystoneFilter(d.namespace, d.instance) for d in devices] self.scanner = BeaconScanner( callback, bt_device_id, device_filters, EddystoneTLMFrame) self.scanning = False def start(self): """Continuously scan for BLE advertisements.""" if not self.scanning: self.scanner.start() self.scanning = True else: _LOGGER.debug( "start() called, but scanner is already running") def process_packet(self, namespace, instance, temperature): """Assign temperature to device.""" _LOGGER.debug("Received temperature for <%s,%s>: %d", namespace, instance, temperature) for dev in self.devices: if dev.namespace == namespace and dev.instance == instance: if dev.temperature != temperature: dev.temperature = temperature dev.schedule_update_ha_state() def stop(self): """Signal runner to stop and join thread.""" if self.scanning: _LOGGER.debug("Stopping...") self.scanner.stop() _LOGGER.debug("Stopped") self.scanning = False else: _LOGGER.debug( "stop() called but scanner was not running")
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/sensor/eddystone_temperature.py
""" Tracks devices by sending a ICMP echo request (ping). For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.ping/ """ import logging import subprocess import sys from datetime import timedelta import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.device_tracker import ( PLATFORM_SCHEMA, CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL, SOURCE_TYPE_ROUTER) from homeassistant import util from homeassistant import const _LOGGER = logging.getLogger(__name__) CONF_PING_COUNT = 'count' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(const.CONF_HOSTS): {cv.string: cv.string}, vol.Optional(CONF_PING_COUNT, default=1): cv.positive_int, }) class Host: """Host object with ping detection.""" def __init__(self, ip_address, dev_id, hass, config): """Initialize the Host pinger.""" self.hass = hass self.ip_address = ip_address self.dev_id = dev_id self._count = config[CONF_PING_COUNT] if sys.platform == 'win32': self._ping_cmd = ['ping', '-n', '1', '-w', '1000', self.ip_address] else: self._ping_cmd = ['ping', '-n', '-q', '-c1', '-W1', self.ip_address] def ping(self): """Send an ICMP echo request and return True if success.""" pinger = subprocess.Popen(self._ping_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) try: pinger.communicate() return pinger.returncode == 0 except subprocess.CalledProcessError: return False def update(self, see): """Update device state by sending one or more ping messages.""" failed = 0 while failed < self._count: # check more times if host is unreachable if self.ping(): see(dev_id=self.dev_id, source_type=SOURCE_TYPE_ROUTER) return True failed += 1 _LOGGER.debug("No response from %s failed=%d", self.ip_address, failed) def setup_scanner(hass, config, see, discovery_info=None): """Set up the Host objects and return the update function.""" hosts = [Host(ip, dev_id, hass, config) for (dev_id, ip) in config[const.CONF_HOSTS].items()] interval = config.get(CONF_SCAN_INTERVAL, timedelta(seconds=len(hosts) * config[CONF_PING_COUNT]) + DEFAULT_SCAN_INTERVAL) _LOGGER.debug("Started ping tracker with interval=%s on hosts: %s", interval, ",".join([host.ip_address for host in hosts])) def update_interval(now): """Update all the hosts on every interval time.""" try: for host in hosts: host.update(see) finally: hass.helpers.event.track_point_in_utc_time( update_interval, util.dt.utcnow() + interval) update_interval(None) return True
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/device_tracker/ping.py
""" Support for LimitlessLED bulbs. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.limitlessled/ """ import logging import voluptuous as vol from homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PORT, CONF_TYPE, STATE_ON) from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_HS_COLOR, ATTR_TRANSITION, EFFECT_COLORLOOP, EFFECT_WHITE, FLASH_LONG, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH, SUPPORT_COLOR, SUPPORT_TRANSITION, Light, PLATFORM_SCHEMA) import homeassistant.helpers.config_validation as cv from homeassistant.util.color import ( color_temperature_mired_to_kelvin, color_hs_to_RGB) from homeassistant.helpers.restore_state import RestoreEntity REQUIREMENTS = ['limitlessled==1.1.3'] _LOGGER = logging.getLogger(__name__) CONF_BRIDGES = 'bridges' CONF_GROUPS = 'groups' CONF_NUMBER = 'number' CONF_VERSION = 'version' CONF_FADE = 'fade' DEFAULT_LED_TYPE = 'rgbw' DEFAULT_PORT = 5987 DEFAULT_TRANSITION = 0 DEFAULT_VERSION = 6 DEFAULT_FADE = False LED_TYPE = ['rgbw', 'rgbww', 'white', 'bridge-led', 'dimmer'] EFFECT_NIGHT = 'night' MIN_SATURATION = 10 WHITE = [0, 0] SUPPORT_LIMITLESSLED_WHITE = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT | SUPPORT_TRANSITION) SUPPORT_LIMITLESSLED_DIMMER = (SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION) SUPPORT_LIMITLESSLED_RGB = (SUPPORT_BRIGHTNESS | SUPPORT_EFFECT | SUPPORT_FLASH | SUPPORT_COLOR | SUPPORT_TRANSITION) SUPPORT_LIMITLESSLED_RGBWW = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_EFFECT | SUPPORT_FLASH | SUPPORT_COLOR | SUPPORT_TRANSITION) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_BRIDGES): vol.All(cv.ensure_list, [ { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): cv.positive_int, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Required(CONF_GROUPS): vol.All(cv.ensure_list, [ { vol.Required(CONF_NAME): cv.string, vol.Optional(CONF_TYPE, default=DEFAULT_LED_TYPE): vol.In(LED_TYPE), vol.Required(CONF_NUMBER): cv.positive_int, vol.Optional(CONF_FADE, default=DEFAULT_FADE): cv.boolean, } ]), }, ]), }) def rewrite_legacy(config): """Rewrite legacy configuration to new format.""" bridges = config.get(CONF_BRIDGES, [config]) new_bridges = [] for bridge_conf in bridges: groups = [] if 'groups' in bridge_conf: groups = bridge_conf['groups'] else: _LOGGER.warning("Legacy configuration format detected") for i in range(1, 5): name_key = 'group_%d_name' % i if name_key in bridge_conf: groups.append({ 'number': i, 'type': bridge_conf.get('group_%d_type' % i, DEFAULT_LED_TYPE), 'name': bridge_conf.get(name_key) }) new_bridges.append({ 'host': bridge_conf.get(CONF_HOST), 'version': bridge_conf.get(CONF_VERSION), 'port': bridge_conf.get(CONF_PORT), 'groups': groups }) return {'bridges': new_bridges} def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the LimitlessLED lights.""" from limitlessled.bridge import Bridge # Two legacy configuration formats are supported to maintain backwards # compatibility. config = rewrite_legacy(config) # Use the expanded configuration format. lights = [] for bridge_conf in config.get(CONF_BRIDGES): bridge = Bridge(bridge_conf.get(CONF_HOST), port=bridge_conf.get(CONF_PORT, DEFAULT_PORT), version=bridge_conf.get(CONF_VERSION, DEFAULT_VERSION)) for group_conf in bridge_conf.get(CONF_GROUPS): group = bridge.add_group( group_conf.get(CONF_NUMBER), group_conf.get(CONF_NAME), group_conf.get(CONF_TYPE, DEFAULT_LED_TYPE)) lights.append(LimitlessLEDGroup(group, { 'fade': group_conf[CONF_FADE] })) add_entities(lights) def state(new_state): """State decorator. Specify True (turn on) or False (turn off). """ def decorator(function): """Set up the decorator function.""" # pylint: disable=protected-access def wrapper(self, **kwargs): """Wrap a group state change.""" from limitlessled.pipeline import Pipeline pipeline = Pipeline() transition_time = DEFAULT_TRANSITION if self._effect == EFFECT_COLORLOOP: self.group.stop() self._effect = None # Set transition time. if ATTR_TRANSITION in kwargs: transition_time = int(kwargs[ATTR_TRANSITION]) # Do group type-specific work. function(self, transition_time, pipeline, **kwargs) # Update state. self._is_on = new_state self.group.enqueue(pipeline) self.schedule_update_ha_state() return wrapper return decorator class LimitlessLEDGroup(Light, RestoreEntity): """Representation of a LimitessLED group.""" def __init__(self, group, config): """Initialize a group.""" from limitlessled.group.rgbw import RgbwGroup from limitlessled.group.white import WhiteGroup from limitlessled.group.dimmer import DimmerGroup from limitlessled.group.rgbww import RgbwwGroup if isinstance(group, WhiteGroup): self._supported = SUPPORT_LIMITLESSLED_WHITE self._effect_list = [EFFECT_NIGHT] elif isinstance(group, DimmerGroup): self._supported = SUPPORT_LIMITLESSLED_DIMMER self._effect_list = [] elif isinstance(group, RgbwGroup): self._supported = SUPPORT_LIMITLESSLED_RGB self._effect_list = [EFFECT_COLORLOOP, EFFECT_NIGHT, EFFECT_WHITE] elif isinstance(group, RgbwwGroup): self._supported = SUPPORT_LIMITLESSLED_RGBWW self._effect_list = [EFFECT_COLORLOOP, EFFECT_NIGHT, EFFECT_WHITE] self.group = group self.config = config self._is_on = False self._brightness = None self._temperature = None self._color = None self._effect = None async def async_added_to_hass(self): """Handle entity about to be added to hass event.""" await super().async_added_to_hass() last_state = await self.async_get_last_state() if last_state: self._is_on = (last_state.state == STATE_ON) self._brightness = last_state.attributes.get('brightness') self._temperature = last_state.attributes.get('color_temp') self._color = last_state.attributes.get('hs_color') @property def should_poll(self): """No polling needed.""" return False @property def assumed_state(self): """Return True because unable to access real state of the entity.""" return True @property def name(self): """Return the name of the group.""" return self.group.name @property def is_on(self): """Return true if device is on.""" return self._is_on @property def brightness(self): """Return the brightness property.""" if self._effect == EFFECT_NIGHT: return 1 return self._brightness @property def min_mireds(self): """Return the coldest color_temp that this light supports.""" return 154 @property def max_mireds(self): """Return the warmest color_temp that this light supports.""" return 370 @property def color_temp(self): """Return the temperature property.""" if self.hs_color is not None: return None return self._temperature @property def hs_color(self): """Return the color property.""" if self._effect == EFFECT_NIGHT: return None if self._color is None or self._color[1] == 0: return None return self._color @property def supported_features(self): """Flag supported features.""" return self._supported @property def effect(self): """Return the current effect for this light.""" return self._effect @property def effect_list(self): """Return the list of supported effects for this light.""" return self._effect_list # pylint: disable=arguments-differ @state(False) def turn_off(self, transition_time, pipeline, **kwargs): """Turn off a group.""" if self.config[CONF_FADE]: pipeline.transition(transition_time, brightness=0.0) pipeline.off() # pylint: disable=arguments-differ @state(True) def turn_on(self, transition_time, pipeline, **kwargs): """Turn on (or adjust property of) a group.""" # The night effect does not need a turned on light if kwargs.get(ATTR_EFFECT) == EFFECT_NIGHT: if EFFECT_NIGHT in self._effect_list: pipeline.night_light() self._effect = EFFECT_NIGHT return pipeline.on() # Set up transition. args = {} if self.config[CONF_FADE] and not self.is_on and self._brightness: args['brightness'] = self.limitlessled_brightness() if ATTR_BRIGHTNESS in kwargs: self._brightness = kwargs[ATTR_BRIGHTNESS] args['brightness'] = self.limitlessled_brightness() if ATTR_HS_COLOR in kwargs and self._supported & SUPPORT_COLOR: self._color = kwargs[ATTR_HS_COLOR] # White is a special case. if self._color[1] < MIN_SATURATION: pipeline.white() self._color = WHITE else: args['color'] = self.limitlessled_color() if ATTR_COLOR_TEMP in kwargs: if self._supported & SUPPORT_COLOR: pipeline.white() self._color = WHITE if self._supported & SUPPORT_COLOR_TEMP: self._temperature = kwargs[ATTR_COLOR_TEMP] args['temperature'] = self.limitlessled_temperature() if args: pipeline.transition(transition_time, **args) # Flash. if ATTR_FLASH in kwargs and self._supported & SUPPORT_FLASH: duration = 0 if kwargs[ATTR_FLASH] == FLASH_LONG: duration = 1 pipeline.flash(duration=duration) # Add effects. if ATTR_EFFECT in kwargs and self._effect_list: if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP: from limitlessled.presets import COLORLOOP self._effect = EFFECT_COLORLOOP pipeline.append(COLORLOOP) if kwargs[ATTR_EFFECT] == EFFECT_WHITE: pipeline.white() self._color = WHITE def limitlessled_temperature(self): """Convert Home Assistant color temperature units to percentage.""" max_kelvin = color_temperature_mired_to_kelvin(self.min_mireds) min_kelvin = color_temperature_mired_to_kelvin(self.max_mireds) width = max_kelvin - min_kelvin kelvin = color_temperature_mired_to_kelvin(self._temperature) temperature = (kelvin - min_kelvin) / width return max(0, min(1, temperature)) def limitlessled_brightness(self): """Convert Home Assistant brightness units to percentage.""" return self._brightness / 255 def limitlessled_color(self): """Convert Home Assistant HS list to RGB Color tuple.""" from limitlessled import Color return Color(*color_hs_to_RGB(*tuple(self._color)))
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/light/limitlessled.py
""" Support for Rflink components. For more details about this component, please refer to the documentation at https://home-assistant.io/components/rflink/ """ import asyncio from collections import defaultdict import logging import async_timeout import voluptuous as vol from homeassistant.const import ( ATTR_ENTITY_ID, CONF_COMMAND, CONF_HOST, CONF_PORT, STATE_ON, EVENT_HOMEASSISTANT_STOP) from homeassistant.core import CoreState, callback from homeassistant.exceptions import HomeAssistantError import homeassistant.helpers.config_validation as cv from homeassistant.helpers.deprecation import get_deprecated from homeassistant.helpers.entity import Entity from homeassistant.helpers.dispatcher import ( async_dispatcher_send, async_dispatcher_connect) from homeassistant.helpers.restore_state import RestoreEntity REQUIREMENTS = ['rflink==0.0.37'] _LOGGER = logging.getLogger(__name__) ATTR_EVENT = 'event' ATTR_STATE = 'state' CONF_ALIASES = 'aliases' CONF_ALIASSES = 'aliasses' CONF_GROUP_ALIASES = 'group_aliases' CONF_GROUP_ALIASSES = 'group_aliasses' CONF_GROUP = 'group' CONF_NOGROUP_ALIASES = 'nogroup_aliases' CONF_NOGROUP_ALIASSES = 'nogroup_aliasses' CONF_DEVICE_DEFAULTS = 'device_defaults' CONF_DEVICE_ID = 'device_id' CONF_DEVICES = 'devices' CONF_AUTOMATIC_ADD = 'automatic_add' CONF_FIRE_EVENT = 'fire_event' CONF_IGNORE_DEVICES = 'ignore_devices' CONF_RECONNECT_INTERVAL = 'reconnect_interval' CONF_SIGNAL_REPETITIONS = 'signal_repetitions' CONF_WAIT_FOR_ACK = 'wait_for_ack' DATA_DEVICE_REGISTER = 'rflink_device_register' DATA_ENTITY_LOOKUP = 'rflink_entity_lookup' DATA_ENTITY_GROUP_LOOKUP = 'rflink_entity_group_only_lookup' DEFAULT_RECONNECT_INTERVAL = 10 DEFAULT_SIGNAL_REPETITIONS = 1 CONNECTION_TIMEOUT = 10 EVENT_BUTTON_PRESSED = 'button_pressed' EVENT_KEY_COMMAND = 'command' EVENT_KEY_ID = 'id' EVENT_KEY_SENSOR = 'sensor' EVENT_KEY_UNIT = 'unit' RFLINK_GROUP_COMMANDS = ['allon', 'alloff'] DOMAIN = 'rflink' SERVICE_SEND_COMMAND = 'send_command' SIGNAL_AVAILABILITY = 'rflink_device_available' SIGNAL_HANDLE_EVENT = 'rflink_handle_event_{}' TMP_ENTITY = 'tmp.{}' DEVICE_DEFAULTS_SCHEMA = vol.Schema({ vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean, vol.Optional(CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS): vol.Coerce(int), }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_PORT): vol.Any(cv.port, cv.string), vol.Optional(CONF_HOST): cv.string, vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean, vol.Optional(CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL): int, vol.Optional(CONF_IGNORE_DEVICES, default=[]): vol.All(cv.ensure_list, [cv.string]), }), }, extra=vol.ALLOW_EXTRA) SEND_COMMAND_SCHEMA = vol.Schema({ vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_COMMAND): cv.string, }) def identify_event_type(event): """Look at event to determine type of device. Async friendly. """ if EVENT_KEY_COMMAND in event: return EVENT_KEY_COMMAND if EVENT_KEY_SENSOR in event: return EVENT_KEY_SENSOR return 'unknown' async def async_setup(hass, config): """Set up the Rflink component.""" from rflink.protocol import create_rflink_connection import serial # Allow entities to register themselves by device_id to be looked up when # new rflink events arrive to be handled hass.data[DATA_ENTITY_LOOKUP] = { EVENT_KEY_COMMAND: defaultdict(list), EVENT_KEY_SENSOR: defaultdict(list), } hass.data[DATA_ENTITY_GROUP_LOOKUP] = { EVENT_KEY_COMMAND: defaultdict(list), } # Allow platform to specify function to register new unknown devices hass.data[DATA_DEVICE_REGISTER] = {} async def async_send_command(call): """Send Rflink command.""" _LOGGER.debug('Rflink command for %s', str(call.data)) if not (await RflinkCommand.send_command( call.data.get(CONF_DEVICE_ID), call.data.get(CONF_COMMAND))): _LOGGER.error('Failed Rflink command for %s', str(call.data)) hass.services.async_register( DOMAIN, SERVICE_SEND_COMMAND, async_send_command, schema=SEND_COMMAND_SCHEMA) @callback def event_callback(event): """Handle incoming Rflink events. Rflink events arrive as dictionaries of varying content depending on their type. Identify the events and distribute accordingly. """ event_type = identify_event_type(event) _LOGGER.debug('event of type %s: %s', event_type, event) # Don't propagate non entity events (eg: version string, ack response) if event_type not in hass.data[DATA_ENTITY_LOOKUP]: _LOGGER.debug('unhandled event of type: %s', event_type) return # Lookup entities who registered this device id as device id or alias event_id = event.get(EVENT_KEY_ID, None) is_group_event = (event_type == EVENT_KEY_COMMAND and event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS) if is_group_event: entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get( event_id, []) else: entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id] _LOGGER.debug('entity_ids: %s', entity_ids) if entity_ids: # Propagate event to every entity matching the device id for entity in entity_ids: _LOGGER.debug('passing event to %s', entity) async_dispatcher_send(hass, SIGNAL_HANDLE_EVENT.format(entity), event) elif not is_group_event: # If device is not yet known, register with platform (if loaded) if event_type in hass.data[DATA_DEVICE_REGISTER]: _LOGGER.debug('device_id not known, adding new device') # Add bogus event_id first to avoid race if we get another # event before the device is created # Any additional events received before the device has been # created will thus be ignored. hass.data[DATA_ENTITY_LOOKUP][event_type][ event_id].append(TMP_ENTITY.format(event_id)) hass.async_create_task( hass.data[DATA_DEVICE_REGISTER][event_type](event)) else: _LOGGER.debug('device_id not known and automatic add disabled') # When connecting to tcp host instead of serial port (optional) host = config[DOMAIN].get(CONF_HOST) # TCP port when host configured, otherwise serial port port = config[DOMAIN][CONF_PORT] @callback def reconnect(exc=None): """Schedule reconnect after connection has been unexpectedly lost.""" # Reset protocol binding before starting reconnect RflinkCommand.set_rflink_protocol(None) async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False) # If HA is not stopping, initiate new connection if hass.state != CoreState.stopping: _LOGGER.warning('disconnected from Rflink, reconnecting') hass.async_create_task(connect()) async def connect(): """Set up connection and hook it into HA for reconnect/shutdown.""" _LOGGER.info('Initiating Rflink connection') # Rflink create_rflink_connection decides based on the value of host # (string or None) if serial or tcp mode should be used # Initiate serial/tcp connection to Rflink gateway connection = create_rflink_connection( port=port, host=host, event_callback=event_callback, disconnect_callback=reconnect, loop=hass.loop, ignore=config[DOMAIN][CONF_IGNORE_DEVICES] ) try: with async_timeout.timeout(CONNECTION_TIMEOUT, loop=hass.loop): transport, protocol = await connection except (serial.serialutil.SerialException, ConnectionRefusedError, TimeoutError, OSError, asyncio.TimeoutError) as exc: reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL] _LOGGER.exception( "Error connecting to Rflink, reconnecting in %s", reconnect_interval) # Connection to Rflink device is lost, make entities unavailable async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False) hass.loop.call_later(reconnect_interval, reconnect, exc) return # There is a valid connection to a Rflink device now so # mark entities as available async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True) # Bind protocol to command class to allow entities to send commands RflinkCommand.set_rflink_protocol( protocol, config[DOMAIN][CONF_WAIT_FOR_ACK]) # handle shutdown of Rflink asyncio transport hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, lambda x: transport.close()) _LOGGER.info('Connected to Rflink') hass.async_create_task(connect()) return True class RflinkDevice(Entity): """Representation of a Rflink device. Contains the common logic for Rflink entities. """ platform = None _state = None _available = True def __init__(self, device_id, initial_event=None, name=None, aliases=None, group=True, group_aliases=None, nogroup_aliases=None, fire_event=False, signal_repetitions=DEFAULT_SIGNAL_REPETITIONS): """Initialize the device.""" # Rflink specific attributes for every component type self._initial_event = initial_event self._device_id = device_id if name: self._name = name else: self._name = device_id self._aliases = aliases self._group = group self._group_aliases = group_aliases self._nogroup_aliases = nogroup_aliases self._should_fire_event = fire_event self._signal_repetitions = signal_repetitions @callback def handle_event_callback(self, event): """Handle incoming event for device type.""" # Call platform specific event handler self._handle_event(event) # Propagate changes through ha self.async_schedule_update_ha_state() # Put command onto bus for user to subscribe to if self._should_fire_event and identify_event_type( event) == EVENT_KEY_COMMAND: self.hass.bus.async_fire(EVENT_BUTTON_PRESSED, { ATTR_ENTITY_ID: self.entity_id, ATTR_STATE: event[EVENT_KEY_COMMAND], }) _LOGGER.debug("Fired bus event for %s: %s", self.entity_id, event[EVENT_KEY_COMMAND]) def _handle_event(self, event): """Platform specific event handler.""" raise NotImplementedError() @property def should_poll(self): """No polling needed.""" return False @property def name(self): """Return a name for the device.""" return self._name @property def is_on(self): """Return true if device is on.""" if self.assumed_state: return False return self._state @property def assumed_state(self): """Assume device state until first device event sets state.""" return self._state is None @property def available(self): """Return True if entity is available.""" return self._available @callback def _availability_callback(self, availability): """Update availability state.""" self._available = availability self.async_schedule_update_ha_state() async def async_added_to_hass(self): """Register update callback.""" # Remove temporary bogus entity_id if added tmp_entity = TMP_ENTITY.format(self._device_id) if tmp_entity in self.hass.data[DATA_ENTITY_LOOKUP][ EVENT_KEY_COMMAND][self._device_id]: self.hass.data[DATA_ENTITY_LOOKUP][ EVENT_KEY_COMMAND][self._device_id].remove(tmp_entity) # Register id and aliases self.hass.data[DATA_ENTITY_LOOKUP][ EVENT_KEY_COMMAND][self._device_id].append(self.entity_id) if self._group: self.hass.data[DATA_ENTITY_GROUP_LOOKUP][ EVENT_KEY_COMMAND][self._device_id].append(self.entity_id) # aliases respond to both normal and group commands (allon/alloff) if self._aliases: for _id in self._aliases: self.hass.data[DATA_ENTITY_LOOKUP][ EVENT_KEY_COMMAND][_id].append(self.entity_id) self.hass.data[DATA_ENTITY_GROUP_LOOKUP][ EVENT_KEY_COMMAND][_id].append(self.entity_id) # group_aliases only respond to group commands (allon/alloff) if self._group_aliases: for _id in self._group_aliases: self.hass.data[DATA_ENTITY_GROUP_LOOKUP][ EVENT_KEY_COMMAND][_id].append(self.entity_id) # nogroup_aliases only respond to normal commands if self._nogroup_aliases: for _id in self._nogroup_aliases: self.hass.data[DATA_ENTITY_LOOKUP][ EVENT_KEY_COMMAND][_id].append(self.entity_id) async_dispatcher_connect(self.hass, SIGNAL_AVAILABILITY, self._availability_callback) async_dispatcher_connect(self.hass, SIGNAL_HANDLE_EVENT.format(self.entity_id), self.handle_event_callback) # Process the initial event now that the entity is created if self._initial_event: self.handle_event_callback(self._initial_event) class RflinkCommand(RflinkDevice): """Singleton class to make Rflink command interface available to entities. This class is to be inherited by every Entity class that is actionable (switches/lights). It exposes the Rflink command interface for these entities. The Rflink interface is managed as a class level and set during setup (and reset on reconnect). """ # Keep repetition tasks to cancel if state is changed before repetitions # are sent _repetition_task = None _protocol = None @classmethod def set_rflink_protocol(cls, protocol, wait_ack=None): """Set the Rflink asyncio protocol as a class variable.""" cls._protocol = protocol if wait_ack is not None: cls._wait_ack = wait_ack @classmethod def is_connected(cls): """Return connection status.""" return bool(cls._protocol) @classmethod async def send_command(cls, device_id, action): """Send device command to Rflink and wait for acknowledgement.""" return await cls._protocol.send_command_ack(device_id, action) async def _async_handle_command(self, command, *args): """Do bookkeeping for command, send it to rflink and update state.""" self.cancel_queued_send_commands() if command == 'turn_on': cmd = 'on' self._state = True elif command == 'turn_off': cmd = 'off' self._state = False elif command == 'dim': # convert brightness to rflink dim level cmd = str(int(args[0] / 17)) self._state = True elif command == 'toggle': cmd = 'on' # if the state is unknown or false, it gets set as true # if the state is true, it gets set as false self._state = self._state in [None, False] # Cover options for RFlink elif command == 'close_cover': cmd = 'DOWN' self._state = False elif command == 'open_cover': cmd = 'UP' self._state = True elif command == 'stop_cover': cmd = 'STOP' self._state = True # Send initial command and queue repetitions. # This allows the entity state to be updated quickly and not having to # wait for all repetitions to be sent await self._async_send_command(cmd, self._signal_repetitions) # Update state of entity await self.async_update_ha_state() def cancel_queued_send_commands(self): """Cancel queued signal repetition commands. For example when user changed state while repetitions are still queued for broadcast. Or when an incoming Rflink command (remote switch) changes the state. """ # cancel any outstanding tasks from the previous state change if self._repetition_task: self._repetition_task.cancel() async def _async_send_command(self, cmd, repetitions): """Send a command for device to Rflink gateway.""" _LOGGER.debug( "Sending command: %s to Rflink device: %s", cmd, self._device_id) if not self.is_connected(): raise HomeAssistantError('Cannot send command, not connected!') if self._wait_ack: # Puts command on outgoing buffer then waits for Rflink to confirm # the command has been send out in the ether. await self._protocol.send_command_ack(self._device_id, cmd) else: # Puts command on outgoing buffer and returns straight away. # Rflink protocol/transport handles asynchronous writing of buffer # to serial/tcp device. Does not wait for command send # confirmation. self._protocol.send_command(self._device_id, cmd) if repetitions > 1: self._repetition_task = self.hass.async_create_task( self._async_send_command(cmd, repetitions - 1)) class SwitchableRflinkDevice(RflinkCommand, RestoreEntity): """Rflink entity which can switch on/off (eg: light, switch).""" async def async_added_to_hass(self): """Restore RFLink device state (ON/OFF).""" await super().async_added_to_hass() old_state = await self.async_get_last_state() if old_state is not None: self._state = old_state.state == STATE_ON def _handle_event(self, event): """Adjust state if Rflink picks up a remote command for this device.""" self.cancel_queued_send_commands() command = event['command'] if command in ['on', 'allon']: self._state = True elif command in ['off', 'alloff']: self._state = False def async_turn_on(self, **kwargs): """Turn the device on.""" return self._async_handle_command("turn_on") def async_turn_off(self, **kwargs): """Turn the device off.""" return self._async_handle_command("turn_off") DEPRECATED_CONFIG_OPTIONS = [ CONF_ALIASSES, CONF_GROUP_ALIASSES, CONF_NOGROUP_ALIASSES] REPLACEMENT_CONFIG_OPTIONS = [ CONF_ALIASES, CONF_GROUP_ALIASES, CONF_NOGROUP_ALIASES] def remove_deprecated(config): """Remove deprecated config options from device config.""" for index, deprecated_option in enumerate(DEPRECATED_CONFIG_OPTIONS): if deprecated_option in config: replacement_option = REPLACEMENT_CONFIG_OPTIONS[index] # generate deprecation warning get_deprecated(config, replacement_option, deprecated_option) # remove old config value replacing new one config[replacement_option] = config.pop(deprecated_option)
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/rflink/__init__.py
""" Support for Concord232 alarm control panels. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/alarm_control_panel.concord232/ """ import datetime import logging import requests import voluptuous as vol import homeassistant.components.alarm_control_panel as alarm import homeassistant.helpers.config_validation as cv from homeassistant.components.alarm_control_panel import PLATFORM_SCHEMA from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PORT, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED) REQUIREMENTS = ['concord232==0.15'] _LOGGER = logging.getLogger(__name__) DEFAULT_HOST = 'localhost' DEFAULT_NAME = 'CONCORD232' DEFAULT_PORT = 5007 SCAN_INTERVAL = datetime.timedelta(seconds=10) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Concord232 alarm control panel platform.""" name = config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT) url = 'http://{}:{}'.format(host, port) try: add_entities([Concord232Alarm(url, name)], True) except requests.exceptions.ConnectionError as ex: _LOGGER.error("Unable to connect to Concord232: %s", str(ex)) class Concord232Alarm(alarm.AlarmControlPanel): """Representation of the Concord232-based alarm panel.""" def __init__(self, url, name): """Initialize the Concord232 alarm panel.""" from concord232 import client as concord232_client self._state = None self._name = name self._url = url self._alarm = concord232_client.Client(self._url) self._alarm.partitions = self._alarm.list_partitions() self._alarm.last_partition_update = datetime.datetime.now() @property def name(self): """Return the name of the device.""" return self._name @property def code_format(self): """Return the characters if code is defined.""" return alarm.FORMAT_NUMBER @property def state(self): """Return the state of the device.""" return self._state def update(self): """Update values from API.""" try: part = self._alarm.list_partitions()[0] except requests.exceptions.ConnectionError as ex: _LOGGER.error("Unable to connect to %(host)s: %(reason)s", dict(host=self._url, reason=ex)) return except IndexError: _LOGGER.error("Concord232 reports no partitions") return if part['arming_level'] == 'Off': self._state = STATE_ALARM_DISARMED elif 'Home' in part['arming_level']: self._state = STATE_ALARM_ARMED_HOME else: self._state = STATE_ALARM_ARMED_AWAY def alarm_disarm(self, code=None): """Send disarm command.""" self._alarm.disarm(code) def alarm_arm_home(self, code=None): """Send arm home command.""" self._alarm.arm('stay') def alarm_arm_away(self, code=None): """Send arm away command.""" self._alarm.arm('away')
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/alarm_control_panel/concord232.py
"""Constants for Google Hangouts Component.""" import logging import voluptuous as vol from homeassistant.components.notify \ import ATTR_MESSAGE, ATTR_TARGET, ATTR_DATA import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger('homeassistant.components.hangouts') DOMAIN = 'hangouts' CONF_2FA = '2fa' CONF_REFRESH_TOKEN = 'refresh_token' CONF_BOT = 'bot' CONF_CONVERSATIONS = 'conversations' CONF_DEFAULT_CONVERSATIONS = 'default_conversations' CONF_ERROR_SUPPRESSED_CONVERSATIONS = 'error_suppressed_conversations' CONF_INTENTS = 'intents' CONF_INTENT_TYPE = 'intent_type' CONF_SENTENCES = 'sentences' CONF_MATCHERS = 'matchers' INTENT_HELP = 'HangoutsHelp' EVENT_HANGOUTS_CONNECTED = 'hangouts_connected' EVENT_HANGOUTS_DISCONNECTED = 'hangouts_disconnected' EVENT_HANGOUTS_USERS_CHANGED = 'hangouts_users_changed' EVENT_HANGOUTS_CONVERSATIONS_CHANGED = 'hangouts_conversations_changed' EVENT_HANGOUTS_CONVERSATIONS_RESOLVED = 'hangouts_conversations_resolved' EVENT_HANGOUTS_MESSAGE_RECEIVED = 'hangouts_message_received' CONF_CONVERSATION_ID = 'id' CONF_CONVERSATION_NAME = 'name' SERVICE_SEND_MESSAGE = 'send_message' SERVICE_UPDATE = 'update' SERVICE_RECONNECT = 'reconnect' TARGETS_SCHEMA = vol.All( vol.Schema({ vol.Exclusive(CONF_CONVERSATION_ID, 'id or name'): cv.string, vol.Exclusive(CONF_CONVERSATION_NAME, 'id or name'): cv.string }), cv.has_at_least_one_key(CONF_CONVERSATION_ID, CONF_CONVERSATION_NAME) ) MESSAGE_SEGMENT_SCHEMA = vol.Schema({ vol.Required('text'): cv.string, vol.Optional('is_bold'): cv.boolean, vol.Optional('is_italic'): cv.boolean, vol.Optional('is_strikethrough'): cv.boolean, vol.Optional('is_underline'): cv.boolean, vol.Optional('parse_str'): cv.boolean, vol.Optional('link_target'): cv.string }) MESSAGE_DATA_SCHEMA = vol.Schema({ vol.Optional('image_file'): cv.string, vol.Optional('image_url'): cv.string }) MESSAGE_SCHEMA = vol.Schema({ vol.Required(ATTR_TARGET): [TARGETS_SCHEMA], vol.Required(ATTR_MESSAGE): [MESSAGE_SEGMENT_SCHEMA], vol.Optional(ATTR_DATA): MESSAGE_DATA_SCHEMA }) INTENT_SCHEMA = vol.All( # Basic Schema vol.Schema({ vol.Required(CONF_SENTENCES): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_CONVERSATIONS): [TARGETS_SCHEMA] }), )
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/hangouts/const.py
""" Support for Nest thermostats. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/climate.nest/ """ import logging import voluptuous as vol from homeassistant.components.nest import ( DATA_NEST, SIGNAL_NEST_UPDATE, DOMAIN as NEST_DOMAIN) from homeassistant.components.climate import ( STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_ECO, ClimateDevice, PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW, ATTR_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_HIGH, SUPPORT_TARGET_TEMPERATURE_LOW, SUPPORT_OPERATION_MODE, SUPPORT_AWAY_MODE, SUPPORT_FAN_MODE) from homeassistant.const import ( TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF) from homeassistant.helpers.dispatcher import async_dispatcher_connect DEPENDENCIES = ['nest'] _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_SCAN_INTERVAL): vol.All(vol.Coerce(int), vol.Range(min=1)), }) NEST_MODE_HEAT_COOL = 'heat-cool' def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Nest thermostat. No longer in use. """ async def async_setup_entry(hass, entry, async_add_entities): """Set up the Nest climate device based on a config entry.""" temp_unit = hass.config.units.temperature_unit thermostats = await hass.async_add_job(hass.data[DATA_NEST].thermostats) all_devices = [NestThermostat(structure, device, temp_unit) for structure, device in thermostats] async_add_entities(all_devices, True) class NestThermostat(ClimateDevice): """Representation of a Nest thermostat.""" def __init__(self, structure, device, temp_unit): """Initialize the thermostat.""" self._unit = temp_unit self.structure = structure self.device = device self._fan_list = [STATE_ON, STATE_AUTO] # Set the default supported features self._support_flags = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE) # Not all nest devices support cooling and heating remove unused self._operation_list = [STATE_OFF] # Add supported nest thermostat features if self.device.can_heat: self._operation_list.append(STATE_HEAT) if self.device.can_cool: self._operation_list.append(STATE_COOL) if self.device.can_heat and self.device.can_cool: self._operation_list.append(STATE_AUTO) self._support_flags = (self._support_flags | SUPPORT_TARGET_TEMPERATURE_HIGH | SUPPORT_TARGET_TEMPERATURE_LOW) self._operation_list.append(STATE_ECO) # feature of device self._has_fan = self.device.has_fan if self._has_fan: self._support_flags = (self._support_flags | SUPPORT_FAN_MODE) # data attributes self._away = None self._location = None self._name = None self._humidity = None self._target_temperature = None self._temperature = None self._temperature_scale = None self._mode = None self._fan = None self._eco_temperature = None self._is_locked = None self._locked_temperature = None self._min_temperature = None self._max_temperature = None @property def should_poll(self): """Do not need poll thanks using Nest streaming API.""" return False async def async_added_to_hass(self): """Register update signal handler.""" async def async_update_state(): """Update device state.""" await self.async_update_ha_state(True) async_dispatcher_connect(self.hass, SIGNAL_NEST_UPDATE, async_update_state) @property def supported_features(self): """Return the list of supported features.""" return self._support_flags @property def unique_id(self): """Return unique ID for this device.""" return self.device.serial @property def device_info(self): """Return information about the device.""" return { 'identifiers': { (NEST_DOMAIN, self.device.device_id), }, 'name': self.device.name_long, 'manufacturer': 'Nest Labs', 'model': "Thermostat", 'sw_version': self.device.software_version, } @property def name(self): """Return the name of the nest, if any.""" return self._name @property def temperature_unit(self): """Return the unit of measurement.""" return self._temperature_scale @property def current_temperature(self): """Return the current temperature.""" return self._temperature @property def current_operation(self): """Return current operation ie. heat, cool, idle.""" if self._mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]: return self._mode if self._mode == NEST_MODE_HEAT_COOL: return STATE_AUTO return None @property def target_temperature(self): """Return the temperature we try to reach.""" if self._mode not in (NEST_MODE_HEAT_COOL, STATE_ECO): return self._target_temperature return None @property def target_temperature_low(self): """Return the lower bound temperature we try to reach.""" if self._mode == STATE_ECO: return self._eco_temperature[0] if self._mode == NEST_MODE_HEAT_COOL: return self._target_temperature[0] return None @property def target_temperature_high(self): """Return the upper bound temperature we try to reach.""" if self._mode == STATE_ECO: return self._eco_temperature[1] if self._mode == NEST_MODE_HEAT_COOL: return self._target_temperature[1] return None @property def is_away_mode_on(self): """Return if away mode is on.""" return self._away def set_temperature(self, **kwargs): """Set new target temperature.""" import nest temp = None target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW) target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH) if self._mode == NEST_MODE_HEAT_COOL: if target_temp_low is not None and target_temp_high is not None: temp = (target_temp_low, target_temp_high) _LOGGER.debug("Nest set_temperature-output-value=%s", temp) else: temp = kwargs.get(ATTR_TEMPERATURE) _LOGGER.debug("Nest set_temperature-output-value=%s", temp) try: if temp is not None: self.device.target = temp except nest.nest.APIError as api_error: _LOGGER.error("An error occurred while setting temperature: %s", api_error) # restore target temperature self.schedule_update_ha_state(True) def set_operation_mode(self, operation_mode): """Set operation mode.""" if operation_mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]: device_mode = operation_mode elif operation_mode == STATE_AUTO: device_mode = NEST_MODE_HEAT_COOL else: device_mode = STATE_OFF _LOGGER.error( "An error occurred while setting device mode. " "Invalid operation mode: %s", operation_mode) self.device.mode = device_mode @property def operation_list(self): """List of available operation modes.""" return self._operation_list def turn_away_mode_on(self): """Turn away on.""" self.structure.away = True def turn_away_mode_off(self): """Turn away off.""" self.structure.away = False @property def current_fan_mode(self): """Return whether the fan is on.""" if self._has_fan: # Return whether the fan is on return STATE_ON if self._fan else STATE_AUTO # No Fan available so disable slider return None @property def fan_list(self): """List of available fan modes.""" if self._has_fan: return self._fan_list return None def set_fan_mode(self, fan_mode): """Turn fan on/off.""" if self._has_fan: self.device.fan = fan_mode.lower() @property def min_temp(self): """Identify min_temp in Nest API or defaults if not available.""" return self._min_temperature @property def max_temp(self): """Identify max_temp in Nest API or defaults if not available.""" return self._max_temperature def update(self): """Cache value from Python-nest.""" self._location = self.device.where self._name = self.device.name self._humidity = self.device.humidity self._temperature = self.device.temperature self._mode = self.device.mode self._target_temperature = self.device.target self._fan = self.device.fan self._away = self.structure.away == 'away' self._eco_temperature = self.device.eco_temperature self._locked_temperature = self.device.locked_temperature self._min_temperature = self.device.min_temperature self._max_temperature = self.device.max_temperature self._is_locked = self.device.is_locked if self.device.temperature_scale == 'C': self._temperature_scale = TEMP_CELSIUS else: self._temperature_scale = TEMP_FAHRENHEIT
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/nest/climate.py
""" Add support for the Xiaomi TVs. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/xiaomi_tv/ """ import logging import voluptuous as vol from homeassistant.components.media_player import ( MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.components.media_player.const import ( SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_STEP) from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pymitv==1.4.3'] DEFAULT_NAME = "Xiaomi TV" _LOGGER = logging.getLogger(__name__) SUPPORT_XIAOMI_TV = SUPPORT_VOLUME_STEP | SUPPORT_TURN_ON | \ SUPPORT_TURN_OFF # No host is needed for configuration, however it can be set. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Xiaomi TV platform.""" from pymitv import Discover # If a hostname is set. Discovery is skipped. host = config.get(CONF_HOST) name = config.get(CONF_NAME) if host is not None: # Check if there's a valid TV at the IP address. if not Discover().check_ip(host): _LOGGER.error( "Could not find Xiaomi TV with specified IP: %s", host) else: # Register TV with Home Assistant. add_entities([XiaomiTV(host, name)]) else: # Otherwise, discover TVs on network. add_entities(XiaomiTV(tv, DEFAULT_NAME) for tv in Discover().scan()) class XiaomiTV(MediaPlayerDevice): """Represent the Xiaomi TV for Home Assistant.""" def __init__(self, ip, name): """Receive IP address and name to construct class.""" # Import pymitv library. from pymitv import TV # Initialize the Xiaomi TV. self._tv = TV(ip) # Default name value, only to be overridden by user. self._name = name self._state = STATE_OFF @property def name(self): """Return the display name of this TV.""" return self._name @property def state(self): """Return _state variable, containing the appropriate constant.""" return self._state @property def assumed_state(self): """Indicate that state is assumed.""" return True @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_XIAOMI_TV def turn_off(self): """ Instruct the TV to turn sleep. This is done instead of turning off, because the TV won't accept any input when turned off. Thus, the user would be unable to turn the TV back on, unless it's done manually. """ if self._state is not STATE_OFF: self._tv.sleep() self._state = STATE_OFF def turn_on(self): """Wake the TV back up from sleep.""" if self._state is not STATE_ON: self._tv.wake() self._state = STATE_ON def volume_up(self): """Increase volume by one.""" self._tv.volume_up() def volume_down(self): """Decrease volume by one.""" self._tv.volume_down()
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/media_player/xiaomi_tv.py
""" Interfaces with Z-Wave sensors. For more details about this platform, please refer to the documentation https://home-assistant.io/components/binary_sensor.zwave/ """ import logging import datetime import homeassistant.util.dt as dt_util from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.event import track_point_in_time from homeassistant.components import zwave from homeassistant.components.zwave import workaround from homeassistant.components.binary_sensor import ( DOMAIN, BinarySensorDevice) _LOGGER = logging.getLogger(__name__) DEPENDENCIES = [] async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Old method of setting up Z-Wave binary sensors.""" pass async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Z-Wave binary sensors from Config Entry.""" @callback def async_add_binary_sensor(binary_sensor): """Add Z-Wave binary sensor.""" async_add_entities([binary_sensor]) async_dispatcher_connect(hass, 'zwave_new_binary_sensor', async_add_binary_sensor) def get_device(values, **kwargs): """Create Z-Wave entity device.""" device_mapping = workaround.get_device_mapping(values.primary) if device_mapping == workaround.WORKAROUND_NO_OFF_EVENT: return ZWaveTriggerSensor(values, "motion") if workaround.get_device_component_mapping(values.primary) == DOMAIN: return ZWaveBinarySensor(values, None) if values.primary.command_class == zwave.const.COMMAND_CLASS_SENSOR_BINARY: return ZWaveBinarySensor(values, None) return None class ZWaveBinarySensor(BinarySensorDevice, zwave.ZWaveDeviceEntity): """Representation of a binary sensor within Z-Wave.""" def __init__(self, values, device_class): """Initialize the sensor.""" zwave.ZWaveDeviceEntity.__init__(self, values, DOMAIN) self._sensor_type = device_class self._state = self.values.primary.data def update_properties(self): """Handle data changes for node values.""" self._state = self.values.primary.data @property def is_on(self): """Return true if the binary sensor is on.""" return self._state @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" return self._sensor_type class ZWaveTriggerSensor(ZWaveBinarySensor): """Representation of a stateless sensor within Z-Wave.""" def __init__(self, values, device_class): """Initialize the sensor.""" super(ZWaveTriggerSensor, self).__init__(values, device_class) # Set default off delay to 60 sec self.re_arm_sec = 60 self.invalidate_after = None def update_properties(self): """Handle value changes for this entity's node.""" self._state = self.values.primary.data _LOGGER.debug('off_delay=%s', self.values.off_delay) # Set re_arm_sec if off_delay is provided from the sensor if self.values.off_delay: _LOGGER.debug('off_delay.data=%s', self.values.off_delay.data) self.re_arm_sec = self.values.off_delay.data * 8 # only allow this value to be true for re_arm secs if not self.hass: return self.invalidate_after = dt_util.utcnow() + datetime.timedelta( seconds=self.re_arm_sec) track_point_in_time( self.hass, self.async_update_ha_state, self.invalidate_after) @property def is_on(self): """Return true if movement has happened within the rearm time.""" return self._state and \ (self.invalidate_after is None or self.invalidate_after > dt_util.utcnow())
"""The tests for the litejet component.""" import logging from unittest import mock from datetime import timedelta import pytest from homeassistant import setup import homeassistant.util.dt as dt_util from homeassistant.components import litejet import homeassistant.components.automation as automation from tests.common import (async_fire_time_changed, async_mock_service) _LOGGER = logging.getLogger(__name__) ENTITY_SWITCH = 'switch.mock_switch_1' ENTITY_SWITCH_NUMBER = 1 ENTITY_OTHER_SWITCH = 'switch.mock_switch_2' ENTITY_OTHER_SWITCH_NUMBER = 2 @pytest.fixture def calls(hass): """Track calls to a mock serivce.""" return async_mock_service(hass, 'test', 'automation') def get_switch_name(number): """Get a mock switch name.""" return "Mock Switch #"+str(number) @pytest.fixture def mock_lj(hass): """Initialize components.""" with mock.patch('pylitejet.LiteJet') as mock_pylitejet: mock_lj = mock_pylitejet.return_value mock_lj.switch_pressed_callbacks = {} mock_lj.switch_released_callbacks = {} def on_switch_pressed(number, callback): mock_lj.switch_pressed_callbacks[number] = callback def on_switch_released(number, callback): mock_lj.switch_released_callbacks[number] = callback mock_lj.loads.return_value = range(0) mock_lj.button_switches.return_value = range(1, 3) mock_lj.all_switches.return_value = range(1, 6) mock_lj.scenes.return_value = range(0) mock_lj.get_switch_name.side_effect = get_switch_name mock_lj.on_switch_pressed.side_effect = on_switch_pressed mock_lj.on_switch_released.side_effect = on_switch_released config = { 'litejet': { 'port': '/tmp/this_will_be_mocked' } } assert hass.loop.run_until_complete(setup.async_setup_component( hass, litejet.DOMAIN, config)) mock_lj.start_time = dt_util.utcnow() mock_lj.last_delta = timedelta(0) return mock_lj async def simulate_press(hass, mock_lj, number): """Test to simulate a press.""" _LOGGER.info('*** simulate press of %d', number) callback = mock_lj.switch_pressed_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_release(hass, mock_lj, number): """Test to simulate releasing.""" _LOGGER.info('*** simulate release of %d', number) callback = mock_lj.switch_released_callbacks.get(number) with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + mock_lj.last_delta): if callback is not None: await hass.async_add_job(callback) await hass.async_block_till_done() async def simulate_time(hass, mock_lj, delta): """Test to simulate time.""" _LOGGER.info( '*** simulate time change by %s: %s', delta, mock_lj.start_time + delta) mock_lj.last_delta = delta with mock.patch('homeassistant.helpers.condition.dt_util.utcnow', return_value=mock_lj.start_time + delta): _LOGGER.info('now=%s', dt_util.utcnow()) async_fire_time_changed(hass, mock_lj.start_time + delta) await hass.async_block_till_done() _LOGGER.info('done with now=%s', dt_util.utcnow()) async def setup_automation(hass, trigger): """Test setting up the automation.""" assert await setup.async_setup_component(hass, automation.DOMAIN, { automation.DOMAIN: [ { 'alias': 'My Test', 'trigger': trigger, 'action': { 'service': 'test.automation' } } ] }) await hass.async_block_till_done() async def test_simple(hass, calls, mock_lj): """Test the simplest form of a LiteJet trigger.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_more_than_short(hass, calls, mock_lj): """Test a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_more_than_long(hass, calls, mock_lj): """Test a hold that is long enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 1 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_short(hass, calls, mock_lj): """Test a hold that is short enough.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.1)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_less_than_long(hass, calls, mock_lj): """Test a hold that is too long.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_less_than': { 'milliseconds': '200' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.3)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_short(hass, calls, mock_lj): """Test an in-range trigger with a too short hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) await simulate_time(hass, mock_lj, timedelta(seconds=0.05)) await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 async def test_held_in_range_just_right(hass, calls, mock_lj): """Test an in-range trigger with a just right hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.2)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 1 async def test_held_in_range_long(hass, calls, mock_lj): """Test an in-range trigger with a too long hold.""" await setup_automation(hass, { 'platform': 'litejet', 'number': ENTITY_OTHER_SWITCH_NUMBER, 'held_more_than': { 'milliseconds': '100' }, 'held_less_than': { 'milliseconds': '300' } }) await simulate_press(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0 await simulate_time(hass, mock_lj, timedelta(seconds=0.4)) assert len(calls) == 0 await simulate_release(hass, mock_lj, ENTITY_OTHER_SWITCH_NUMBER) assert len(calls) == 0
PetePriority/home-assistant
tests/components/automation/test_litejet.py
homeassistant/components/zwave/binary_sensor.py