repo_name
stringlengths
5
100
path
stringlengths
4
251
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
499
1.05M
license
stringclasses
15 values
ryfeus/lambda-packs
HDF4_H5_NETCDF/source2.7/numpy/distutils/tests/test_exec_command.py
18
7146
from __future__ import division, absolute_import, print_function import os import sys from tempfile import TemporaryFile from numpy.distutils import exec_command from numpy.distutils.exec_command import get_pythonexe from numpy.testing import tempdir, assert_ # In python 3 stdout, stderr are text (unicode compliant) devices, so to # emulate them import StringIO from the io module. if sys.version_info[0] >= 3: from io import StringIO else: from StringIO import StringIO class redirect_stdout(object): """Context manager to redirect stdout for exec_command test.""" def __init__(self, stdout=None): self._stdout = stdout or sys.stdout def __enter__(self): self.old_stdout = sys.stdout sys.stdout = self._stdout def __exit__(self, exc_type, exc_value, traceback): self._stdout.flush() sys.stdout = self.old_stdout # note: closing sys.stdout won't close it. self._stdout.close() class redirect_stderr(object): """Context manager to redirect stderr for exec_command test.""" def __init__(self, stderr=None): self._stderr = stderr or sys.stderr def __enter__(self): self.old_stderr = sys.stderr sys.stderr = self._stderr def __exit__(self, exc_type, exc_value, traceback): self._stderr.flush() sys.stderr = self.old_stderr # note: closing sys.stderr won't close it. self._stderr.close() class emulate_nonposix(object): """Context manager to emulate os.name != 'posix' """ def __init__(self, osname='non-posix'): self._new_name = osname def __enter__(self): self._old_name = os.name os.name = self._new_name def __exit__(self, exc_type, exc_value, traceback): os.name = self._old_name def test_exec_command_stdout(): # Regression test for gh-2999 and gh-2915. # There are several packages (nose, scipy.weave.inline, Sage inline # Fortran) that replace stdout, in which case it doesn't have a fileno # method. This is tested here, with a do-nothing command that fails if the # presence of fileno() is assumed in exec_command. # The code has a special case for posix systems, so if we are on posix test # both that the special case works and that the generic code works. # Test posix version: with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): exec_command.exec_command("cd '.'") if os.name == 'posix': # Test general (non-posix) version: with emulate_nonposix(): with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): exec_command.exec_command("cd '.'") def test_exec_command_stderr(): # Test posix version: with redirect_stdout(TemporaryFile(mode='w+')): with redirect_stderr(StringIO()): exec_command.exec_command("cd '.'") if os.name == 'posix': # Test general (non-posix) version: with emulate_nonposix(): with redirect_stdout(TemporaryFile()): with redirect_stderr(StringIO()): exec_command.exec_command("cd '.'") class TestExecCommand(object): def setup(self): self.pyexe = get_pythonexe() def check_nt(self, **kws): s, o = exec_command.exec_command('cmd /C echo path=%path%') assert_(s == 0) assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) assert_(s == 0) assert_(o == 'win32') def check_posix(self, **kws): s, o = exec_command.exec_command("echo Hello", **kws) assert_(s == 0) assert_(o == 'Hello') s, o = exec_command.exec_command('echo $AAA', **kws) assert_(s == 0) assert_(o == '') s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) assert_(s == 0) assert_(o == 'Tere') s, o = exec_command.exec_command('echo "$AAA"', **kws) assert_(s == 0) assert_(o == '') if 'BBB' not in os.environ: os.environ['BBB'] = 'Hi' s, o = exec_command.exec_command('echo "$BBB"', **kws) assert_(s == 0) assert_(o == 'Hi') s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) assert_(s == 0) assert_(o == 'Hey') s, o = exec_command.exec_command('echo "$BBB"', **kws) assert_(s == 0) assert_(o == 'Hi') del os.environ['BBB'] s, o = exec_command.exec_command('echo "$BBB"', **kws) assert_(s == 0) assert_(o == '') s, o = exec_command.exec_command('this_is_not_a_command', **kws) assert_(s != 0) assert_(o != '') s, o = exec_command.exec_command('echo path=$PATH', **kws) assert_(s == 0) assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % self.pyexe, **kws) assert_(s == 0) assert_(o == 'posix') def check_basic(self, *kws): s, o = exec_command.exec_command( '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) assert_(s != 0) assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "import sys;sys.stderr.write(\'0\');' 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % self.pyexe, **kws) assert_(s == 0) assert_(o == '012') s, o = exec_command.exec_command( '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) assert_(s == 15) assert_(o == '') s, o = exec_command.exec_command( '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) assert_(s == 0) assert_(o == 'Heipa') def check_execute_in(self, **kws): with tempdir() as tmpdir: fn = "file" tmpfile = os.path.join(tmpdir, fn) f = open(tmpfile, 'w') f.write('Hello') f.close() s, o = exec_command.exec_command( '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % (self.pyexe, fn), **kws) assert_(s != 0) assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) assert_(s == 0) assert_(o == 'Hello') def test_basic(self): with redirect_stdout(StringIO()): with redirect_stderr(StringIO()): if os.name == "posix": self.check_posix(use_tee=0) self.check_posix(use_tee=1) elif os.name == "nt": self.check_nt(use_tee=0) self.check_nt(use_tee=1) self.check_execute_in(use_tee=0) self.check_execute_in(use_tee=1)
mit
ryfeus/lambda-packs
Tensorflow_OpenCV_Nightly/source/numpy/add_newdocs.py
6
242489
""" This is only meant to add docs to objects defined in C-extension modules. The purpose is to allow easier editing of the docstrings without requiring a re-compile. NOTE: Many of the methods of ndarray have corresponding functions. If you update these docstrings, please keep also the ones in core/fromnumeric.py, core/defmatrix.py up-to-date. """ from __future__ import division, absolute_import, print_function from numpy.lib import add_newdoc ############################################################################### # # flatiter # # flatiter needs a toplevel description # ############################################################################### add_newdoc('numpy.core', 'flatiter', """ Flat iterator object to iterate over arrays. A `flatiter` iterator is returned by ``x.flat`` for any array `x`. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in row-major, C-style order (the last index varying the fastest). The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- ndarray.flat : Return a flat iterator over an array. ndarray.flatten : Returns a flattened copy of an array. Notes ----- A `flatiter` iterator can not be constructed directly from Python code by calling the `flatiter` constructor. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> type(fl) <type 'numpy.flatiter'> >>> for item in fl: ... print(item) ... 0 1 2 3 4 5 >>> fl[2:4] array([2, 3]) """) # flatiter attributes add_newdoc('numpy.core', 'flatiter', ('base', """ A reference to the array that is iterated over. Examples -------- >>> x = np.arange(5) >>> fl = x.flat >>> fl.base is x True """)) add_newdoc('numpy.core', 'flatiter', ('coords', """ An N-dimensional tuple of current coordinates. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.coords (0, 0) >>> fl.next() 0 >>> fl.coords (0, 1) """)) add_newdoc('numpy.core', 'flatiter', ('index', """ Current flat index into the array. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.index 0 >>> fl.next() 0 >>> fl.index 1 """)) # flatiter functions add_newdoc('numpy.core', 'flatiter', ('__array__', """__array__(type=None) Get array from iterator """)) add_newdoc('numpy.core', 'flatiter', ('copy', """ copy() Get a copy of the iterator as a 1-D array. Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> fl = x.flat >>> fl.copy() array([0, 1, 2, 3, 4, 5]) """)) ############################################################################### # # nditer # ############################################################################### add_newdoc('numpy.core', 'nditer', """ Efficient multi-dimensional iterator object to iterate over arrays. To get started using this object, see the :ref:`introductory guide to array iteration <arrays.nditer>`. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. flags : sequence of str, optional Flags to control the behavior of the iterator. * "buffered" enables buffering when required. * "c_index" causes a C-order index to be tracked. * "f_index" causes a Fortran-order index to be tracked. * "multi_index" causes a multi-index, or a tuple of indices with one per iteration dimension, to be tracked. * "common_dtype" causes all the operands to be converted to a common data type, with copying or buffering as necessary. * "copy_if_overlap" causes the iterator to determine if read operands have overlap with write operands, and make temporary copies as necessary to avoid overlap. False positives (needless copying) are possible in some cases. * "delay_bufalloc" delays allocation of the buffers until a reset() call is made. Allows "allocate" operands to be initialized before their values are copied into the buffers. * "external_loop" causes the `values` given to be one-dimensional arrays with multiple values instead of zero-dimensional arrays. * "grow_inner" allows the `value` array sizes to be made larger than the buffer size when both "buffered" and "external_loop" is used. * "ranged" allows the iterator to be restricted to a sub-range of the iterindex values. * "refs_ok" enables iteration of reference types, such as object arrays. * "reduce_ok" enables iteration of "readwrite" operands which are broadcasted, also known as reduction operands. * "zerosize_ok" allows `itersize` to be zero. op_flags : list of list of str, optional This is a list of flags for each operand. At minimum, one of "readonly", "readwrite", or "writeonly" must be specified. * "readonly" indicates the operand will only be read from. * "readwrite" indicates the operand will be read from and written to. * "writeonly" indicates the operand will only be written to. * "no_broadcast" prevents the operand from being broadcasted. * "contig" forces the operand data to be contiguous. * "aligned" forces the operand data to be aligned. * "nbo" forces the operand data to be in native byte order. * "copy" allows a temporary read-only copy if required. * "updateifcopy" allows a temporary read-write copy if required. * "allocate" causes the array to be allocated if it is None in the `op` parameter. * "no_subtype" prevents an "allocate" operand from using a subtype. * "arraymask" indicates that this operand is the mask to use for selecting elements when writing to operands with the 'writemasked' flag set. The iterator does not enforce this, but when writing from a buffer back to the array, it only copies those elements indicated by this mask. * 'writemasked' indicates that only elements where the chosen 'arraymask' operand is True will be written to. * "overlap_assume_elementwise" can be used to mark operands that are accessed only in the iterator order, to allow less conservative copying when "copy_if_overlap" is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. order : {'C', 'F', 'A', 'K'}, optional Controls the iteration order. 'C' means C order, 'F' means Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. This also affects the element memory order of "allocate" operands, as they are allocated to be compatible with iteration order. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur when making a copy or buffering. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. op_axes : list of list of ints, optional If provided, is a list of ints or None for each operands. The list of axes for an operand is a mapping from the dimensions of the iterator to the dimensions of the operand. A value of -1 can be placed for entries, causing that dimension to be treated as "newaxis". itershape : tuple of ints, optional The desired shape of the iterator. This allows "allocate" operands with a dimension mapped by op_axes not corresponding to a dimension of a different operand to get a value not equal to 1 for that dimension. buffersize : int, optional When buffering is enabled, controls the size of the temporary buffers. Set to 0 for the default value. Attributes ---------- dtypes : tuple of dtype(s) The data types of the values provided in `value`. This may be different from the operand data types if buffering is enabled. Valid only before the iterator is closed. finished : bool Whether the iteration over the operands is finished or not. has_delayed_bufalloc : bool If True, the iterator was created with the "delay_bufalloc" flag, and no reset() function was called on it yet. has_index : bool If True, the iterator was created with either the "c_index" or the "f_index" flag, and the property `index` can be used to retrieve it. has_multi_index : bool If True, the iterator was created with the "multi_index" flag, and the property `multi_index` can be used to retrieve it. index When the "c_index" or "f_index" flag was used, this property provides access to the index. Raises a ValueError if accessed and `has_index` is False. iterationneedsapi : bool Whether iteration requires access to the Python API, for example if one of the operands is an object array. iterindex : int An index which matches the order of iteration. itersize : int Size of the iterator. itviews Structured view(s) of `operands` in memory, matching the reordered and optimized iterator access pattern. Valid only before the iterator is closed. multi_index When the "multi_index" flag was used, this property provides access to the index. Raises a ValueError if accessed accessed and `has_multi_index` is False. ndim : int The iterator's dimension. nop : int The number of iterator operands. operands : tuple of operand(s) The array(s) to be iterated over. Valid only before the iterator is closed. shape : tuple of ints Shape tuple, the shape of the iterator. value Value of `operands` at current iteration. Normally, this is a tuple of array scalars, but if the flag "external_loop" is used, it is a tuple of one dimensional arrays. Notes ----- `nditer` supersedes `flatiter`. The iterator implementation behind `nditer` is also exposed by the NumPy C API. The Python exposure supplies two iteration interfaces, one which follows the Python iterator protocol, and another which mirrors the C-style do-while pattern. The native Python approach is better in most cases, but if you need the iterator's coordinates or index, use the C-style pattern. Examples -------- Here is how we might write an ``iter_add`` function, using the Python iterator protocol:: def iter_add_py(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], [['readonly'], ['readonly'], ['writeonly','allocate']]) with it: for (a, b, c) in it: addop(a, b, out=c) return it.operands[2] Here is the same function, but following the C-style pattern:: def iter_add(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], [['readonly'], ['readonly'], ['writeonly','allocate']]) with it: while not it.finished: addop(it[0], it[1], out=it[2]) it.iternext() return it.operands[2] Here is an example outer product function:: def outer_it(x, y, out=None): mulop = np.multiply it = np.nditer([x, y, out], ['external_loop'], [['readonly'], ['readonly'], ['writeonly', 'allocate']], op_axes=[list(range(x.ndim)) + [-1] * y.ndim, [-1] * x.ndim + list(range(y.ndim)), None]) with it: for (a, b, c) in it: mulop(a, b, out=c) return it.operands[2] >>> a = np.arange(2)+1 >>> b = np.arange(3)+1 >>> outer_it(a,b) array([[1, 2, 3], [2, 4, 6]]) Here is an example function which operates like a "lambda" ufunc:: def luf(lamdaexpr, *args, **kwargs): "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)" nargs = len(args) op = (kwargs.get('out',None),) + args it = np.nditer(op, ['buffered','external_loop'], [['writeonly','allocate','no_broadcast']] + [['readonly','nbo','aligned']]*nargs, order=kwargs.get('order','K'), casting=kwargs.get('casting','safe'), buffersize=kwargs.get('buffersize',0)) while not it.finished: it[0] = lamdaexpr(*it[1:]) it.iternext() return it.operands[0] >>> a = np.arange(5) >>> b = np.ones(5) >>> luf(lambda i,j:i*i + j/2, a, b) array([ 0.5, 1.5, 4.5, 9.5, 16.5]) If operand flags `"writeonly"` or `"readwrite"` are used the operands may be views into the original data with the `WRITEBACKIFCOPY` flag. In this case nditer must be used as a context manager or the nditer.close method must be called before using the result. The temporary data will be written back to the original data when the `__exit__` function is called but not before: >>> a = np.arange(6, dtype='i4')[::-2] >>> with nditer(a, [], ... [['writeonly', 'updateifcopy']], ... casting='unsafe', ... op_dtypes=[np.dtype('f4')]) as i: ... x = i.operands[0] ... x[:] = [-1, -2, -3] ... # a still unchanged here >>> a, x array([-1, -2, -3]), array([-1, -2, -3]) It is important to note that once the iterator is exited, dangling references (like `x` in the example) may or may not share data with the original data `a`. If writeback semantics were active, i.e. if `x.base.flags.writebackifcopy` is `True`, then exiting the iterator will sever the connection between `x` and `a`, writing to `x` will no longer write to `a`. If writeback semantics are not active, then `x.data` will still point at some part of `a.data`, and writing to one will affect the other. """) # nditer methods add_newdoc('numpy.core', 'nditer', ('copy', """ copy() Get a copy of the iterator in its current state. Examples -------- >>> x = np.arange(10) >>> y = x + 1 >>> it = np.nditer([x, y]) >>> it.next() (array(0), array(1)) >>> it2 = it.copy() >>> it2.next() (array(1), array(2)) """)) add_newdoc('numpy.core', 'nditer', ('operands', """ operands[`Slice`] The array(s) to be iterated over. Valid only before the iterator is closed. """)) add_newdoc('numpy.core', 'nditer', ('debug_print', """ debug_print() Print the current state of the `nditer` instance and debug info to stdout. """)) add_newdoc('numpy.core', 'nditer', ('enable_external_loop', """ enable_external_loop() When the "external_loop" was not used during construction, but is desired, this modifies the iterator to behave as if the flag was specified. """)) add_newdoc('numpy.core', 'nditer', ('iternext', """ iternext() Check whether iterations are left, and perform a single internal iteration without returning the result. Used in the C-style pattern do-while pattern. For an example, see `nditer`. Returns ------- iternext : bool Whether or not there are iterations left. """)) add_newdoc('numpy.core', 'nditer', ('remove_axis', """ remove_axis(i) Removes axis `i` from the iterator. Requires that the flag "multi_index" be enabled. """)) add_newdoc('numpy.core', 'nditer', ('remove_multi_index', """ remove_multi_index() When the "multi_index" flag was specified, this removes it, allowing the internal iteration structure to be optimized further. """)) add_newdoc('numpy.core', 'nditer', ('reset', """ reset() Reset the iterator to its initial state. """)) add_newdoc('numpy.core', 'nested_iters', """ Create nditers for use in nested loops Create a tuple of `nditer` objects which iterate in nested loops over different axes of the op argument. The first iterator is used in the outermost loop, the last in the innermost loop. Advancing one will change the subsequent iterators to point at its new element. Parameters ---------- op : ndarray or sequence of array_like The array(s) to iterate over. axes : list of list of int Each item is used as an "op_axes" argument to an nditer flags, op_flags, op_dtypes, order, casting, buffersize (optional) See `nditer` parameters of the same name Returns ------- iters : tuple of nditer An nditer for each item in `axes`, outermost first See Also -------- nditer Examples -------- Basic usage. Note how y is the "flattened" version of [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified the first iter's axes as [1] >>> a = np.arange(12).reshape(2, 3, 2) >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) >>> for x in i: ... print(i.multi_index) ... for y in j: ... print('', j.multi_index, y) (0,) (0, 0) 0 (0, 1) 1 (1, 0) 6 (1, 1) 7 (1,) (0, 0) 2 (0, 1) 3 (1, 0) 8 (1, 1) 9 (2,) (0, 0) 4 (0, 1) 5 (1, 0) 10 (1, 1) 11 """) add_newdoc('numpy.core', 'nditer', ('close', """ close() Resolve all writeback semantics in writeable operands. See Also -------- :ref:`nditer-context-manager` """)) ############################################################################### # # broadcast # ############################################################################### add_newdoc('numpy.core', 'broadcast', """ Produce an object that mimics broadcasting. Parameters ---------- in1, in2, ... : array_like Input parameters. Returns ------- b : broadcast object Broadcast the input parameters against one another, and return an object that encapsulates the result. Amongst others, it has ``shape`` and ``nd`` properties, and may be used as an iterator. See Also -------- broadcast_arrays broadcast_to Examples -------- Manually adding two vectors, using broadcasting: >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) >>> out = np.empty(b.shape) >>> out.flat = [u+v for (u,v) in b] >>> out array([[ 5., 6., 7.], [ 6., 7., 8.], [ 7., 8., 9.]]) Compare against built-in broadcasting: >>> x + y array([[5, 6, 7], [6, 7, 8], [7, 8, 9]]) """) # attributes add_newdoc('numpy.core', 'broadcast', ('index', """ current index in broadcasted result Examples -------- >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) >>> b.index 0 >>> b.next(), b.next(), b.next() ((1, 4), (1, 5), (1, 6)) >>> b.index 3 """)) add_newdoc('numpy.core', 'broadcast', ('iters', """ tuple of iterators along ``self``'s "components." Returns a tuple of `numpy.flatiter` objects, one for each "component" of ``self``. See Also -------- numpy.flatiter Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> row, col = b.iters >>> row.next(), col.next() (1, 4) """)) add_newdoc('numpy.core', 'broadcast', ('ndim', """ Number of dimensions of broadcasted result. Alias for `nd`. .. versionadded:: 1.12.0 Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.ndim 2 """)) add_newdoc('numpy.core', 'broadcast', ('nd', """ Number of dimensions of broadcasted result. For code intended for NumPy 1.12.0 and later the more consistent `ndim` is preferred. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.nd 2 """)) add_newdoc('numpy.core', 'broadcast', ('numiter', """ Number of iterators possessed by the broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.numiter 2 """)) add_newdoc('numpy.core', 'broadcast', ('shape', """ Shape of broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.shape (3, 3) """)) add_newdoc('numpy.core', 'broadcast', ('size', """ Total size of broadcasted result. Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.size 9 """)) add_newdoc('numpy.core', 'broadcast', ('reset', """ reset() Reset the broadcasted result's iterator(s). Parameters ---------- None Returns ------- None Examples -------- >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]] >>> b = np.broadcast(x, y) >>> b.index 0 >>> b.next(), b.next(), b.next() ((1, 4), (2, 4), (3, 4)) >>> b.index 3 >>> b.reset() >>> b.index 0 """)) ############################################################################### # # numpy functions # ############################################################################### add_newdoc('numpy.core.multiarray', 'array', """ array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0) Create an array. Parameters ---------- object : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. dtype : data-type, optional The desired data-type for the array. If not given, then the type will be determined as the minimum type required to hold the objects in the sequence. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. copy : bool, optional If true (default), then the object is copied. Otherwise, a copy will only be made if __array__ returns a copy, if obj is a nested sequence, or if a copy is needed to satisfy any of the other requirements (`dtype`, `order`, etc.). order : {'K', 'A', 'C', 'F'}, optional Specify the memory layout of the array. If object is not an array, the newly created array will be in C order (row major) unless 'F' is specified, in which case it will be in Fortran order (column major). If object is an array the following holds. ===== ========= =================================================== order no copy copy=True ===== ========= =================================================== 'K' unchanged F & C order preserved, otherwise most similar order 'A' unchanged F order if input is F and not C, otherwise C order 'C' C order C order 'F' F order F order ===== ========= =================================================== When ``copy=False`` and a copy is made for other reasons, the result is the same as if ``copy=True``, with some exceptions for `A`, see the Notes section. The default order is 'K'. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). ndmin : int, optional Specifies the minimum number of dimensions that the resulting array should have. Ones will be pre-pended to the shape as needed to meet this requirement. Returns ------- out : ndarray An array object satisfying the specified requirements. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Notes ----- When order is 'A' and `object` is an array in neither 'C' nor 'F' order, and a copy is forced by a change in dtype, then the order of the result is not necessarily 'C' as expected. This is likely a bug. Examples -------- >>> np.array([1, 2, 3]) array([1, 2, 3]) Upcasting: >>> np.array([1, 2, 3.0]) array([ 1., 2., 3.]) More than one dimension: >>> np.array([[1, 2], [3, 4]]) array([[1, 2], [3, 4]]) Minimum dimensions 2: >>> np.array([1, 2, 3], ndmin=2) array([[1, 2, 3]]) Type provided: >>> np.array([1, 2, 3], dtype=complex) array([ 1.+0.j, 2.+0.j, 3.+0.j]) Data-type consisting of more than one element: >>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')]) >>> x['a'] array([1, 3]) Creating an array from sub-classes: >>> np.array(np.mat('1 2; 3 4')) array([[1, 2], [3, 4]]) >>> np.array(np.mat('1 2; 3 4'), subok=True) matrix([[1, 2], [3, 4]]) """) add_newdoc('numpy.core.multiarray', 'empty', """ empty(shape, dtype=float, order='C') Return a new array of given shape and type, without initializing entries. Parameters ---------- shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional Desired output data-type for the array, e.g, `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- out : ndarray Array of uninitialized (arbitrary) data of the given shape, dtype, and order. Object arrays will be initialized to None. See Also -------- empty_like : Return an empty array with shape and type of input. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Notes ----- `empty`, unlike `zeros`, does not set the array values to zero, and may therefore be marginally faster. On the other hand, it requires the user to manually set all the values in the array, and should be used with caution. Examples -------- >>> np.empty([2, 2]) array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #random >>> np.empty([2, 2], dtype=int) array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #random """) add_newdoc('numpy.core.multiarray', 'empty_like', """ empty_like(prototype, dtype=None, order='K', subok=True) Return a new array with the same shape and type as a given array. Parameters ---------- prototype : array_like The shape and data-type of `prototype` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of ``prototype`` as closely as possible. .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to True. Returns ------- out : ndarray Array of uninitialized (arbitrary) data with the same shape and type as `prototype`. See Also -------- ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. Notes ----- This function does *not* initialize the returned array; to do that use `zeros_like` or `ones_like` instead. It may be marginally faster than the functions that do set the array values. Examples -------- >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> np.empty_like(a) array([[-1073741821, -1073741821, 3], #random [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> np.empty_like(a) array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) """) add_newdoc('numpy.core.multiarray', 'scalar', """ scalar(dtype, obj) Return a new scalar array of the given type initialized with obj. This function is meant mainly for pickle support. `dtype` must be a valid data-type descriptor. If `dtype` corresponds to an object descriptor, then `obj` can be any object, otherwise `obj` must be a string. If `obj` is not given, it will be interpreted as None for object type and as zeros for all other types. """) add_newdoc('numpy.core.multiarray', 'zeros', """ zeros(shape, dtype=float, order='C') Return a new array of given shape and type, filled with zeros. Parameters ---------- shape : int or tuple of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: 'C' Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- out : ndarray Array of zeros with the given shape, dtype, and order. See Also -------- zeros_like : Return an array of zeros with shape and type of input. empty : Return a new uninitialized array. ones : Return a new array setting values to one. full : Return a new array of given shape filled with value. Examples -------- >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) >>> np.zeros((5,), dtype=int) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) array([[ 0.], [ 0.]]) >>> s = (2,2) >>> np.zeros(s) array([[ 0., 0.], [ 0., 0.]]) >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype array([(0, 0), (0, 0)], dtype=[('x', '<i4'), ('y', '<i4')]) """) add_newdoc('numpy.core.multiarray', 'set_typeDict', """set_typeDict(dict) Set the internal dictionary that can look up an array type using a registered code. """) add_newdoc('numpy.core.multiarray', 'fromstring', """ fromstring(string, dtype=float, count=-1, sep='') A new 1-D array initialized from text data in a string. Parameters ---------- string : str A string containing the data. dtype : data-type, optional The data type of the array; default: float. For binary input data, the data must be in exactly this format. count : int, optional Read this number of `dtype` elements from the data. If this is negative (the default), the count will be determined from the length of the data. sep : str, optional The string separating numbers in the data; extra whitespace between elements is also ignored. .. deprecated:: 1.14 If this argument is not provided, `fromstring` falls back on the behaviour of `frombuffer` after encoding unicode string inputs as either utf-8 (python 3), or the default encoding (python 2). Returns ------- arr : ndarray The constructed array. Raises ------ ValueError If the string is not the correct size to satisfy the requested `dtype` and `count`. See Also -------- frombuffer, fromfile, fromiter Examples -------- >>> np.fromstring('1 2', dtype=int, sep=' ') array([1, 2]) >>> np.fromstring('1, 2', dtype=int, sep=',') array([1, 2]) """) add_newdoc('numpy.core.multiarray', 'fromiter', """ fromiter(iterable, dtype, count=-1) Create a new 1-dimensional array from an iterable object. Parameters ---------- iterable : iterable object An iterable object providing data for the array. dtype : data-type The data-type of the returned array. count : int, optional The number of items to read from *iterable*. The default is -1, which means all data is read. Returns ------- out : ndarray The output array. Notes ----- Specify `count` to improve performance. It allows ``fromiter`` to pre-allocate the output array, instead of resizing it on demand. Examples -------- >>> iterable = (x*x for x in range(5)) >>> np.fromiter(iterable, float) array([ 0., 1., 4., 9., 16.]) """) add_newdoc('numpy.core.multiarray', 'fromfile', """ fromfile(file, dtype=float, count=-1, sep='') Construct an array from data in a text or binary file. A highly efficient way of reading binary data with a known data-type, as well as parsing simply formatted text files. Data written using the `tofile` method can be read using this function. Parameters ---------- file : file or str Open file object or filename. dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order of the items in the file. count : int Number of items to read. ``-1`` means all items (i.e., the complete file). sep : str Separator between items if file is a text file. Empty ("") separator means the file should be treated as binary. Spaces (" ") in the separator match zero or more whitespace characters. A separator consisting only of spaces must match at least one whitespace. See also -------- load, save ndarray.tofile loadtxt : More flexible way of loading data from a text file. Notes ----- Do not rely on the combination of `tofile` and `fromfile` for data storage, as the binary files generated are are not platform independent. In particular, no byte-order or data-type information is saved. Data can be stored in the platform independent ``.npy`` format using `save` and `load` instead. Examples -------- Construct an ndarray: >>> dt = np.dtype([('time', [('min', int), ('sec', int)]), ... ('temp', float)]) >>> x = np.zeros((1,), dtype=dt) >>> x['time']['min'] = 10; x['temp'] = 98.25 >>> x array([((10, 0), 98.25)], dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) Save the raw data to disk: >>> import os >>> fname = os.tmpnam() >>> x.tofile(fname) Read the raw data from disk: >>> np.fromfile(fname, dtype=dt) array([((10, 0), 98.25)], dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) The recommended way to store and load data: >>> np.save(fname, x) >>> np.load(fname + '.npy') array([((10, 0), 98.25)], dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) """) add_newdoc('numpy.core.multiarray', 'frombuffer', """ frombuffer(buffer, dtype=float, count=-1, offset=0) Interpret a buffer as a 1-dimensional array. Parameters ---------- buffer : buffer_like An object that exposes the buffer interface. dtype : data-type, optional Data-type of the returned array; default: float. count : int, optional Number of items to read. ``-1`` means all data in the buffer. offset : int, optional Start reading the buffer from this offset (in bytes); default: 0. Notes ----- If the buffer has data that is not in machine byte-order, this should be specified as part of the data-type, e.g.:: >>> dt = np.dtype(int) >>> dt = dt.newbyteorder('>') >>> np.frombuffer(buf, dtype=dt) The data of the resulting array will not be byteswapped, but will be interpreted correctly. Examples -------- >>> s = 'hello world' >>> np.frombuffer(s, dtype='S1', count=5, offset=6) array(['w', 'o', 'r', 'l', 'd'], dtype='|S1') >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) array([1, 2], dtype=uint8) >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) array([1, 2, 3], dtype=uint8) """) add_newdoc('numpy.core.multiarray', 'concatenate', """ concatenate((a1, a2, ...), axis=0, out=None) Join a sequence of arrays along an existing axis. Parameters ---------- a1, a2, ... : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0. out : ndarray, optional If provided, the destination to place the result. The shape must be correct, matching that of what concatenate would have returned if no out argument were specified. Returns ------- res : ndarray The concatenated array. See Also -------- ma.concatenate : Concatenate function that preserves input masks. array_split : Split an array into multiple sub-arrays of equal or near-equal size. split : Split array into a list of multiple sub-arrays of equal size. hsplit : Split array into multiple sub-arrays horizontally (column wise) vsplit : Split array into multiple sub-arrays vertically (row wise) dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). stack : Stack a sequence of arrays along a new axis. hstack : Stack arrays in sequence horizontally (column wise) vstack : Stack arrays in sequence vertically (row wise) dstack : Stack arrays in sequence depth wise (along third dimension) Notes ----- When one or more of the arrays to be concatenated is a MaskedArray, this function will return a MaskedArray object instead of an ndarray, but the input masks are *not* preserved. In cases where a MaskedArray is expected as input, use the ma.concatenate function from the masked array module instead. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> b = np.array([[5, 6]]) >>> np.concatenate((a, b), axis=0) array([[1, 2], [3, 4], [5, 6]]) >>> np.concatenate((a, b.T), axis=1) array([[1, 2, 5], [3, 4, 6]]) >>> np.concatenate((a, b), axis=None) array([1, 2, 3, 4, 5, 6]) This function will not preserve masking of MaskedArray inputs. >>> a = np.ma.arange(3) >>> a[1] = np.ma.masked >>> b = np.arange(2, 5) >>> a masked_array(data = [0 -- 2], mask = [False True False], fill_value = 999999) >>> b array([2, 3, 4]) >>> np.concatenate([a, b]) masked_array(data = [0 1 2 2 3 4], mask = False, fill_value = 999999) >>> np.ma.concatenate([a, b]) masked_array(data = [0 -- 2 2 3 4], mask = [False True False False False False], fill_value = 999999) """) add_newdoc('numpy.core', 'inner', """ inner(a, b) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray `out.shape = a.shape[:-1] + b.shape[:-1]` Raises ------ ValueError If the last dimension of `a` and `b` has different size. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-1,j0,...,js-1] = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 A multidimensional example: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> np.inner(a, b) array([[ 14, 38, 62], [ 86, 110, 134]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[ 7., 0.], [ 0., 7.]]) """) add_newdoc('numpy.core', 'fastCopyAndTranspose', """_fastCopyAndTranspose(a)""") add_newdoc('numpy.core.multiarray', 'correlate', """cross_correlate(a,v, mode=0)""") add_newdoc('numpy.core.multiarray', 'arange', """ arange([start,] stop[, step,], dtype=None) Return evenly spaced values within a given interval. Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start` but excluding `stop`). For integer arguments the function is equivalent to the Python built-in `range <http://docs.python.org/lib/built-in-funcs.html>`_ function, but returns an ndarray rather than a list. When using a non-integer step, such as 0.1, the results will often not be consistent. It is better to use ``linspace`` for these cases. Parameters ---------- start : number, optional Start of interval. The interval includes this value. The default start value is 0. stop : number End of interval. The interval does not include this value, except in some cases where `step` is not an integer and floating point round-off affects the length of `out`. step : number, optional Spacing between values. For any output `out`, this is the distance between two adjacent values, ``out[i+1] - out[i]``. The default step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. Returns ------- arange : ndarray Array of evenly spaced values. For floating point arguments, the length of the result is ``ceil((stop - start)/step)``. Because of floating point overflow, this rule may result in the last element of `out` being greater than `stop`. See Also -------- linspace : Evenly spaced numbers with careful handling of endpoints. ogrid: Arrays of evenly spaced numbers in N-dimensions. mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. Examples -------- >>> np.arange(3) array([0, 1, 2]) >>> np.arange(3.0) array([ 0., 1., 2.]) >>> np.arange(3,7) array([3, 4, 5, 6]) >>> np.arange(3,7,2) array([3, 5]) """) add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', """_get_ndarray_c_version() Return the compile time NDARRAY_VERSION number. """) add_newdoc('numpy.core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) Construct an empty array. Used by Pickles. """) add_newdoc('numpy.core.multiarray', 'set_string_function', """ set_string_function(f, repr=1) Internal method to set a function to be used when pretty printing arrays. """) add_newdoc('numpy.core.multiarray', 'set_numeric_ops', """ set_numeric_ops(op1=func1, op2=func2, ...) Set numerical operators for array objects. Parameters ---------- op1, op2, ... : callable Each ``op = func`` pair describes an operator to be replaced. For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace addition by modulus 5 addition. Returns ------- saved_ops : list of callables A list of all operators, stored before making replacements. Notes ----- .. WARNING:: Use with care! Incorrect usage may lead to memory errors. A function replacing an operator cannot make use of that operator. For example, when replacing add, you may not use ``+``. Instead, directly call ufuncs. Examples -------- >>> def add_mod5(x, y): ... return np.add(x, y) % 5 ... >>> old_funcs = np.set_numeric_ops(add=add_mod5) >>> x = np.arange(12).reshape((3, 4)) >>> x + x array([[0, 2, 4, 1], [3, 0, 2, 4], [1, 3, 0, 2]]) >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators """) add_newdoc('numpy.core.multiarray', 'where', """ where(condition, [x, y]) Return elements, either from `x` or `y`, depending on `condition`. If only `condition` is given, return ``condition.nonzero()``. Parameters ---------- condition : array_like, bool When True, yield `x`, otherwise yield `y`. x, y : array_like, optional Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. Returns ------- out : ndarray or tuple of ndarrays If both `x` and `y` are specified, the output array contains elements of `x` where `condition` is True, and elements from `y` elsewhere. If only `condition` is given, return the tuple ``condition.nonzero()``, the indices where `condition` is True. See Also -------- nonzero, choose Notes ----- If `x` and `y` are given and input arrays are 1-D, `where` is equivalent to:: [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] Examples -------- >>> np.where([[True, False], [True, True]], ... [[1, 2], [3, 4]], ... [[9, 8], [7, 6]]) array([[1, 8], [3, 4]]) >>> np.where([[0, 1], [1, 0]]) (array([0, 1]), array([1, 0])) >>> x = np.arange(9.).reshape(3, 3) >>> np.where( x > 5 ) (array([2, 2, 2]), array([0, 1, 2])) >>> x[np.where( x > 3.0 )] # Note: result is 1D. array([ 4., 5., 6., 7., 8.]) >>> np.where(x < 5, x, -1) # Note: broadcasting. array([[ 0., 1., 2.], [ 3., 4., -1.], [-1., -1., -1.]]) Find the indices of elements of `x` that are in `goodvalues`. >>> goodvalues = [3, 4, 7] >>> ix = np.isin(x, goodvalues) >>> ix array([[False, False, False], [ True, True, False], [False, True, False]]) >>> np.where(ix) (array([1, 1, 2]), array([0, 1, 1])) """) add_newdoc('numpy.core.multiarray', 'lexsort', """ lexsort(keys, axis=-1) Perform an indirect stable sort using a sequence of keys. Given multiple sorting keys, which can be interpreted as columns in a spreadsheet, lexsort returns an array of integer indices that describes the sort order by multiple columns. The last key in the sequence is used for the primary sort order, the second-to-last key for the secondary sort order, and so on. The keys argument must be a sequence of objects that can be converted to arrays of the same shape. If a 2D array is provided for the keys argument, it's rows are interpreted as the sorting keys and sorting is according to the last row, second last row etc. Parameters ---------- keys : (k, N) array or tuple containing k (N,)-shaped sequences The `k` different "columns" to be sorted. The last column (or row if `keys` is a 2D array) is the primary sort key. axis : int, optional Axis to be indirectly sorted. By default, sort over the last axis. Returns ------- indices : (N,) ndarray of ints Array of indices that sort the keys along the specified axis. See Also -------- argsort : Indirect sort. ndarray.sort : In-place sort. sort : Return a sorted copy of an array. Examples -------- Sort names: first by surname, then by name. >>> surnames = ('Hertz', 'Galilei', 'Hertz') >>> first_names = ('Heinrich', 'Galileo', 'Gustav') >>> ind = np.lexsort((first_names, surnames)) >>> ind array([1, 2, 0]) >>> [surnames[i] + ", " + first_names[i] for i in ind] ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] Sort two columns of numbers: >>> a = [1,5,1,4,3,4,4] # First column >>> b = [9,4,0,4,0,2,1] # Second column >>> ind = np.lexsort((b,a)) # Sort by a, then by b >>> print(ind) [2 0 4 6 5 3 1] >>> [(a[i],b[i]) for i in ind] [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] Note that sorting is first according to the elements of ``a``. Secondary sorting is according to the elements of ``b``. A normal ``argsort`` would have yielded: >>> [(a[i],b[i]) for i in np.argsort(a)] [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)] Structured arrays are sorted lexically by ``argsort``: >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)], ... dtype=np.dtype([('x', int), ('y', int)])) >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) array([2, 0, 4, 6, 5, 3, 1]) """) add_newdoc('numpy.core.multiarray', 'can_cast', """ can_cast(from_, to, casting='safe') Returns True if cast between data types can occur according to the casting rule. If from is a scalar or array scalar, also returns True if the scalar value can be cast without overflow or truncation to an integer. Parameters ---------- from_ : dtype, dtype specifier, scalar, or array Data type, scalar, or array to cast from. to : dtype or dtype specifier Data type to cast to. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. Returns ------- out : bool True if cast can occur according to the casting rule. Notes ----- Starting in NumPy 1.9, can_cast function now returns False in 'safe' casting mode for integer/float dtype and string dtype if the string dtype length is not long enough to store the max integer/float value converted to a string. Previously can_cast in 'safe' mode returned True for integer/float dtype and a string dtype of any length. See also -------- dtype, result_type Examples -------- Basic examples >>> np.can_cast(np.int32, np.int64) True >>> np.can_cast(np.float64, complex) True >>> np.can_cast(complex, float) False >>> np.can_cast('i8', 'f8') True >>> np.can_cast('i8', 'f4') False >>> np.can_cast('i4', 'S4') False Casting scalars >>> np.can_cast(100, 'i1') True >>> np.can_cast(150, 'i1') False >>> np.can_cast(150, 'u1') True >>> np.can_cast(3.5e100, np.float32) False >>> np.can_cast(1000.0, np.float32) True Array scalar checks the value, array does not >>> np.can_cast(np.array(1000.0), np.float32) True >>> np.can_cast(np.array([1000.0]), np.float32) False Using the casting rules >>> np.can_cast('i8', 'i8', 'no') True >>> np.can_cast('<i8', '>i8', 'no') False >>> np.can_cast('<i8', '>i8', 'equiv') True >>> np.can_cast('<i4', '>i8', 'equiv') False >>> np.can_cast('<i4', '>i8', 'safe') True >>> np.can_cast('<i8', '>i4', 'safe') False >>> np.can_cast('<i8', '>i4', 'same_kind') True >>> np.can_cast('<i8', '>u4', 'same_kind') False >>> np.can_cast('<i8', '>u4', 'unsafe') True """) add_newdoc('numpy.core.multiarray', 'promote_types', """ promote_types(type1, type2) Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be safely cast. The returned data type is always in native byte order. This function is symmetric, but rarely associative. Parameters ---------- type1 : dtype or dtype specifier First data type. type2 : dtype or dtype specifier Second data type. Returns ------- out : dtype The promoted data type. Notes ----- .. versionadded:: 1.6.0 Starting in NumPy 1.9, promote_types function now returns a valid string length when given an integer or float dtype as one argument and a string dtype as another argument. Previously it always returned the input string dtype, even if it wasn't long enough to store the max integer/float value converted to a string. See Also -------- result_type, dtype, can_cast Examples -------- >>> np.promote_types('f4', 'f8') dtype('float64') >>> np.promote_types('i8', 'f4') dtype('float64') >>> np.promote_types('>i8', '<c8') dtype('complex128') >>> np.promote_types('i4', 'S8') dtype('S11') An example of a non-associative case: >>> p = np.promote_types >>> p('S', p('i1', 'u1')) dtype('S6') >>> p(p('S', 'i1'), 'u1') dtype('S4') """) add_newdoc('numpy.core.multiarray', 'min_scalar_type', """ min_scalar_type(a) For scalar ``a``, returns the data type with the smallest size and smallest scalar kind which can hold its value. For non-scalar array ``a``, returns the vector's dtype unmodified. Floating point values are not demoted to integers, and complex values are not demoted to floats. Parameters ---------- a : scalar or array_like The value whose minimal data type is to be found. Returns ------- out : dtype The minimal data type. Notes ----- .. versionadded:: 1.6.0 See Also -------- result_type, promote_types, dtype, can_cast Examples -------- >>> np.min_scalar_type(10) dtype('uint8') >>> np.min_scalar_type(-260) dtype('int16') >>> np.min_scalar_type(3.1) dtype('float16') >>> np.min_scalar_type(1e50) dtype('float64') >>> np.min_scalar_type(np.arange(4,dtype='f8')) dtype('float64') """) add_newdoc('numpy.core.multiarray', 'result_type', """ result_type(*arrays_and_dtypes) Returns the type that results from applying the NumPy type promotion rules to the arguments. Type promotion in NumPy works similarly to the rules in languages like C++, with some slight differences. When both scalars and arrays are used, the array's type takes precedence and the actual value of the scalar is taken into account. For example, calculating 3*a, where a is an array of 32-bit floats, intuitively should result in a 32-bit float output. If the 3 is a 32-bit integer, the NumPy rules indicate it can't convert losslessly into a 32-bit float, so a 64-bit float should be the result type. By examining the value of the constant, '3', we see that it fits in an 8-bit integer, which can be cast losslessly into the 32-bit float. Parameters ---------- arrays_and_dtypes : list of arrays and dtypes The operands of some operation whose result type is needed. Returns ------- out : dtype The result type. See also -------- dtype, promote_types, min_scalar_type, can_cast Notes ----- .. versionadded:: 1.6.0 The specific algorithm used is as follows. Categories are determined by first checking which of boolean, integer (int/uint), or floating point (float/complex) the maximum kind of all the arrays and the scalars are. If there are only scalars or the maximum category of the scalars is higher than the maximum category of the arrays, the data types are combined with :func:`promote_types` to produce the return value. Otherwise, `min_scalar_type` is called on each array, and the resulting data types are all combined with :func:`promote_types` to produce the return value. The set of int values is not a subset of the uint values for types with the same number of bits, something not reflected in :func:`min_scalar_type`, but handled as a special case in `result_type`. Examples -------- >>> np.result_type(3, np.arange(7, dtype='i1')) dtype('int8') >>> np.result_type('i4', 'c8') dtype('complex128') >>> np.result_type(3.0, -2) dtype('float64') """) add_newdoc('numpy.core.multiarray', 'newbuffer', """ newbuffer(size) Return a new uninitialized buffer object. Parameters ---------- size : int Size in bytes of returned buffer object. Returns ------- newbuffer : buffer object Returned, uninitialized buffer object of `size` bytes. """) add_newdoc('numpy.core.multiarray', 'getbuffer', """ getbuffer(obj [,offset[, size]]) Create a buffer object from the given object referencing a slice of length size starting at offset. Default is the entire buffer. A read-write buffer is attempted followed by a read-only buffer. Parameters ---------- obj : object offset : int, optional size : int, optional Returns ------- buffer_obj : buffer Examples -------- >>> buf = np.getbuffer(np.ones(5), 1, 3) >>> len(buf) 3 >>> buf[0] '\\x00' >>> buf <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0> """) add_newdoc('numpy.core', 'dot', """ dot(a, b, out=None) Dot product of two arrays. Specifically, - If both `a` and `b` are 1-D arrays, it is inner product of vectors (without complex conjugation). - If both `a` and `b` are 2-D arrays, it is matrix multiplication, but using :func:`matmul` or ``a @ b`` is preferred. - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred. - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over the last axis of `a` and `b`. - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a sum product over the last axis of `a` and the second-to-last axis of `b`:: dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) Parameters ---------- a : array_like First argument. b : array_like Second argument. out : ndarray, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. Returns ------- output : ndarray Returns the dot product of `a` and `b`. If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. If `out` is given, then it is returned. Raises ------ ValueError If the last dimension of `a` is not the same size as the second-to-last dimension of `b`. See Also -------- vdot : Complex-conjugating dot product. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. matmul : '@' operator as method with out parameter. Examples -------- >>> np.dot(3, 4) 12 Neither argument is complex-conjugated: >>> np.dot([2j, 3j], [2j, 3j]) (-13+0j) For 2-D arrays it is the matrix product: >>> a = [[1, 0], [0, 1]] >>> b = [[4, 1], [2, 2]] >>> np.dot(a, b) array([[4, 1], [2, 2]]) >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) >>> np.dot(a, b)[2,3,2,1,2,2] 499128 >>> sum(a[2,3,2,:] * b[1,2,:,2]) 499128 """) add_newdoc('numpy.core', 'matmul', """ matmul(a, b, out=None) Matrix product of two arrays. The behavior depends on the arguments in the following way. - If both arguments are 2-D they are multiplied like conventional matrices. - If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed. - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed. Multiplication by a scalar is not allowed, use ``*`` instead. Note that multiplying a stack of matrices with a vector will result in a stack of vectors, but matmul will not recognize it as such. ``matmul`` differs from ``dot`` in two important ways. - Multiplication by scalars is not allowed. - Stacks of matrices are broadcast together as if the matrices were elements. .. warning:: This function is preliminary and included in NumPy 1.10.0 for testing and documentation. Its semantics will not change, but the number and order of the optional arguments will. .. versionadded:: 1.10.0 Parameters ---------- a : array_like First argument. b : array_like Second argument. out : ndarray, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. Returns ------- output : ndarray Returns the dot product of `a` and `b`. If `a` and `b` are both 1-D arrays then a scalar is returned; otherwise an array is returned. If `out` is given, then it is returned. Raises ------ ValueError If the last dimension of `a` is not the same size as the second-to-last dimension of `b`. If scalar value is passed. See Also -------- vdot : Complex-conjugating dot product. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. dot : alternative matrix product with different broadcasting rules. Notes ----- The matmul function implements the semantics of the `@` operator introduced in Python 3.5 following PEP465. Examples -------- For 2-D arrays it is the matrix product: >>> a = [[1, 0], [0, 1]] >>> b = [[4, 1], [2, 2]] >>> np.matmul(a, b) array([[4, 1], [2, 2]]) For 2-D mixed with 1-D, the result is the usual. >>> a = [[1, 0], [0, 1]] >>> b = [1, 2] >>> np.matmul(a, b) array([1, 2]) >>> np.matmul(b, a) array([1, 2]) Broadcasting is conventional for stacks of arrays >>> a = np.arange(2*2*4).reshape((2,2,4)) >>> b = np.arange(2*2*4).reshape((2,4,2)) >>> np.matmul(a,b).shape (2, 2, 2) >>> np.matmul(a,b)[0,1,1] 98 >>> sum(a[0,1,:] * b[0,:,1]) 98 Vector, vector returns the scalar inner product, but neither argument is complex-conjugated: >>> np.matmul([2j, 3j], [2j, 3j]) (-13+0j) Scalar multiplication raises an error. >>> np.matmul([1,2], 3) Traceback (most recent call last): ... ValueError: Scalar operands are not allowed, use '*' instead """) add_newdoc('numpy.core', 'c_einsum', """ c_einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe') Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional array operations can be represented in a simple fashion. This function provides a way to compute such summations. The best way to understand this function is to try the examples below, which show how many common NumPy functions can be implemented as calls to `einsum`. This is the core C function. Parameters ---------- subscripts : str Specifies the subscripts for summation. operands : list of array_like These are the arrays for the operation. out : ndarray, optional If provided, the calculation is done into this array. dtype : {data-type, None}, optional If provided, forces the calculation to use the data type specified. Note that you may have to also give a more liberal `casting` parameter to allow the conversions. Default is None. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the output. 'C' means it should be C contiguous. 'F' means it should be Fortran contiguous, 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. 'K' means it should be as close to the layout as the inputs as is possible, including arbitrarily permuted axes. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. Default is 'safe'. Returns ------- output : ndarray The calculation based on the Einstein summation convention. See Also -------- einsum, dot, inner, outer, tensordot Notes ----- .. versionadded:: 1.6.0 The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Repeated subscripts labels in one operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``. Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)`` is equivalent to ``np.inner(a,b)``. If a label appears only once, it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` with no changes. The order of labels in the output is by default alphabetical. This means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while ``np.einsum('ji', a)`` takes its transpose. The output can be controlled by specifying output subscript labels as well. This specifies the label order, and allows summing to be disallowed or forced when desired. The call ``np.einsum('i->', a)`` is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)`` is like ``np.diag(a)``. The difference is that `einsum` does not allow broadcasting by default. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, you can do ``np.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` produces a view. An alternative way to provide the subscripts and operands is as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples below have corresponding `einsum` calls with the two parameter methods. .. versionadded:: 1.10.0 Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as ``np.swapaxes(a, 0, 2)`` and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal of a 2D array. Examples -------- >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) >>> np.einsum('ii', a) 60 >>> np.einsum(a, [0,0]) 60 >>> np.trace(a) 60 >>> np.einsum('ii->i', a) array([ 0, 6, 12, 18, 24]) >>> np.einsum(a, [0,0], [0]) array([ 0, 6, 12, 18, 24]) >>> np.diag(a) array([ 0, 6, 12, 18, 24]) >>> np.einsum('ij,j', a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum(a, [0,1], b, [1]) array([ 30, 80, 130, 180, 230]) >>> np.dot(a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum('...j,j', a, b) array([ 30, 80, 130, 180, 230]) >>> np.einsum('ji', c) array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum(c, [1,0]) array([[0, 3], [1, 4], [2, 5]]) >>> c.T array([[0, 3], [1, 4], [2, 5]]) >>> np.einsum('..., ...', 3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.multiply(3, c) array([[ 0, 3, 6], [ 9, 12, 15]]) >>> np.einsum('i,i', b, b) 30 >>> np.einsum(b, [0], b, [0]) 30 >>> np.inner(b,b) 30 >>> np.einsum('i,j', np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.einsum(np.arange(2)+1, [0], b, [1]) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.outer(np.arange(2)+1, b) array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> np.einsum('i...->...', a) array([50, 55, 60, 65, 70]) >>> np.einsum(a, [0,Ellipsis], [Ellipsis]) array([50, 55, 60, 65, 70]) >>> np.sum(a, axis=0) array([50, 55, 60, 65, 70]) >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> np.einsum('ijk,jil->kl', a, b) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> np.tensordot(a,b, axes=([1,0],[0,1])) array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) >>> a = np.arange(6).reshape((3,2)) >>> b = np.arange(12).reshape((4,3)) >>> np.einsum('ki,jk->ij', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('ki,...k->i...', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> np.einsum('k...,jk', a, b) array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> # since version 1.10.0 >>> a = np.zeros((3, 3)) >>> np.einsum('ii->i', a)[:] = 1 >>> a array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) """) add_newdoc('numpy.core', 'vdot', """ vdot(a, b) Return the dot product of two vectors. The vdot(`a`, `b`) function handles complex numbers differently than dot(`a`, `b`). If the first argument is complex the complex conjugate of the first argument is used for the calculation of the dot product. Note that `vdot` handles multidimensional arrays differently than `dot`: it does *not* perform a matrix product, but flattens input arguments to 1-D vectors first. Consequently, it should only be used for vectors. Parameters ---------- a : array_like If `a` is complex the complex conjugate is taken before calculation of the dot product. b : array_like Second argument to the dot product. Returns ------- output : ndarray Dot product of `a` and `b`. Can be an int, float, or complex depending on the types of `a` and `b`. See Also -------- dot : Return the dot product without using the complex conjugate of the first argument. Examples -------- >>> a = np.array([1+2j,3+4j]) >>> b = np.array([5+6j,7+8j]) >>> np.vdot(a, b) (70-8j) >>> np.vdot(b, a) (70+8j) Note that higher-dimensional arrays are flattened! >>> a = np.array([[1, 4], [5, 6]]) >>> b = np.array([[4, 1], [2, 2]]) >>> np.vdot(a, b) 30 >>> np.vdot(b, a) 30 >>> 1*4 + 4*1 + 5*2 + 6*2 30 """) ############################################################################## # # Documentation for ndarray attributes and methods # ############################################################################## ############################################################################## # # ndarray object # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', """ ndarray(shape, dtype=float, buffer=None, offset=0, strides=None, order=None) An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type object describes the format of each element in the array (its byte-order, how many bytes it occupies in memory, whether it is an integer, a floating point number, or something else, etc.) Arrays should be constructed using `array`, `zeros` or `empty` (refer to the See Also section below). The parameters given here refer to a low-level method (`ndarray(...)`) for instantiating an array. For more information, refer to the `numpy` module and examine the methods and attributes of an array. Parameters ---------- (for the __new__ method; see Notes below) shape : tuple of ints Shape of created array. dtype : data-type, optional Any object that can be interpreted as a numpy data type. buffer : object exposing buffer interface, optional Used to fill the array with data. offset : int, optional Offset of array data in buffer. strides : tuple of ints, optional Strides of data in memory. order : {'C', 'F'}, optional Row-major (C-style) or column-major (Fortran-style) order. Attributes ---------- T : ndarray Transpose of the array. data : buffer The array's elements, in memory. dtype : dtype object Describes the format of the elements in the array. flags : dict Dictionary containing information related to memory use, e.g., 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. flat : numpy.flatiter object Flattened version of the array as an iterator. The iterator allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for assignment examples; TODO). imag : ndarray Imaginary part of the array. real : ndarray Real part of the array. size : int Number of elements in the array. itemsize : int The memory use of each array element in bytes. nbytes : int The total number of bytes required to store the array data, i.e., ``itemsize * size``. ndim : int The array's number of dimensions. shape : tuple of ints Shape of the array. strides : tuple of ints The step-size required to move from one element to the next in memory. For example, a contiguous ``(3, 4)`` array of type ``int16`` in C-order has strides ``(8, 2)``. This implies that to move from element to element in memory requires jumps of 2 bytes. To move from row-to-row, one needs to jump 8 bytes at a time (``2 * 4``). ctypes : ctypes object Class containing properties of the array needed for interaction with ctypes. base : ndarray If the array is a view into another array, that array is its `base` (unless that array is also a view). The `base` array is where the array data is actually stored. See Also -------- array : Construct an array. zeros : Create an array, each element of which is zero. empty : Create an array, but leave its allocated memory unchanged (i.e., it contains "garbage"). dtype : Create a data-type. Notes ----- There are two modes of creating an array using ``__new__``: 1. If `buffer` is None, then only `shape`, `dtype`, and `order` are used. 2. If `buffer` is an object exposing the buffer interface, then all keywords are interpreted. No ``__init__`` method is needed because the array is fully initialized after the ``__new__`` method. Examples -------- These examples illustrate the low-level `ndarray` constructor. Refer to the `See Also` section above for easier ways of constructing an ndarray. First mode, `buffer` is None: >>> np.ndarray(shape=(2,2), dtype=float, order='F') array([[ -1.13698227e+002, 4.25087011e-303], [ 2.88528414e-306, 3.27025015e-309]]) #random Second mode: >>> np.ndarray((2,), buffer=np.array([1,2,3]), ... offset=np.int_().itemsize, ... dtype=int) # offset = 1*itemsize, i.e. skip first element array([2, 3]) """) ############################################################################## # # ndarray attributes # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', """Array protocol: Python side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', """None.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', """Array priority.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', """Array protocol: C-struct side.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', """Allow the array to be interpreted as a ctypes object by returning the data-memory location as an integer """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('base', """ Base object if memory is from some other object. Examples -------- The base of an array that owns its memory is None: >>> x = np.array([1,2,3,4]) >>> x.base is None True Slicing creates a view, whose memory is shared with x: >>> y = x[2:] >>> y.base is x True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', """ An object to simplify the interaction of the array with the ctypes module. This attribute creates an object that makes it easier to use arrays when calling shared libraries with the ctypes module. The returned object has, among others, data, shape, and strides attributes (see Notes below) which themselves return ctypes objects that can be used as arguments to a shared library. Parameters ---------- None Returns ------- c : Python object Possessing attributes data, shape, strides, etc. See Also -------- numpy.ctypeslib Notes ----- Below are the public attributes of this object which were documented in "Guide to NumPy" (we have omitted undocumented public attributes, as well as documented private attributes): * data: A pointer to the memory area of the array as a Python integer. This memory area may contain data that is not aligned, or not in correct byte-order. The memory area may not even be writeable. The array flags and data-type of this array should be respected when passing this attribute to arbitrary C-code to avoid trouble that can include Python crashing. User Beware! The value of this attribute is exactly the same as self._array_interface_['data'][0]. * shape (c_intp*self.ndim): A ctypes array of length self.ndim where the basetype is the C-integer corresponding to dtype('p') on this platform. This base-type could be c_int, c_long, or c_longlong depending on the platform. The c_intp type is defined accordingly in numpy.ctypeslib. The ctypes array contains the shape of the underlying array. * strides (c_intp*self.ndim): A ctypes array of length self.ndim where the basetype is the same as for the shape attribute. This ctypes array contains the strides information from the underlying array. This strides information is important for showing how many bytes must be jumped to get to the next element in the array. * data_as(obj): Return the data pointer cast to a particular c-types object. For example, calling self._as_parameter_ is equivalent to self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a pointer to a ctypes array of floating-point data: self.data_as(ctypes.POINTER(ctypes.c_double)). * shape_as(obj): Return the shape tuple as an array of some other c-types type. For example: self.shape_as(ctypes.c_short). * strides_as(obj): Return the strides tuple as an array of some other c-types type. For example: self.strides_as(ctypes.c_longlong). Be careful using the ctypes attribute - especially on temporary arrays or arrays constructed on the fly. For example, calling ``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory that is invalid because the array created as (a+b) is deallocated before the next Python statement. You can avoid this problem using either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will hold a reference to the array until ct is deleted or re-assigned. If the ctypes module is not available, then the ctypes attribute of array objects still returns something useful, but ctypes objects are not returned and errors may be raised instead. In particular, the object will still have the as parameter attribute which will return an integer equal to the data attribute. Examples -------- >>> import ctypes >>> x array([[0, 1], [2, 3]]) >>> x.ctypes.data 30439712 >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) <ctypes.LP_c_long object at 0x01F01300> >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents c_long(0) >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents c_longlong(4294967296L) >>> x.ctypes.shape <numpy.core._internal.c_long_Array_2 object at 0x01FFD580> >>> x.ctypes.shape_as(ctypes.c_long) <numpy.core._internal.c_long_Array_2 object at 0x01FCE620> >>> x.ctypes.strides <numpy.core._internal.c_long_Array_2 object at 0x01FCE620> >>> x.ctypes.strides_as(ctypes.c_longlong) <numpy.core._internal.c_longlong_Array_2 object at 0x01F01300> """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('data', """Python buffer object pointing to the start of the array's data.""")) add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', """ Data-type of the array's elements. Parameters ---------- None Returns ------- d : numpy dtype object See Also -------- numpy.dtype Examples -------- >>> x array([[0, 1], [2, 3]]) >>> x.dtype dtype('int32') >>> type(x.dtype) <type 'numpy.dtype'> """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', """ The imaginary part of the array. Examples -------- >>> x = np.sqrt([1+0j, 0+1j]) >>> x.imag array([ 0. , 0.70710678]) >>> x.imag.dtype dtype('float64') """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', """ Length of one array element in bytes. Examples -------- >>> x = np.array([1,2,3], dtype=np.float64) >>> x.itemsize 8 >>> x = np.array([1,2,3], dtype=np.complex128) >>> x.itemsize 16 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', """ Information about the memory layout of the array. Attributes ---------- C_CONTIGUOUS (C) The data is in a single, C-style contiguous segment. F_CONTIGUOUS (F) The data is in a single, Fortran-style contiguous segment. OWNDATA (O) The array owns the memory it uses or borrows it from another object. WRITEABLE (W) The data area can be written to. Setting this to False locks the data, making it read-only. A view (slice, etc.) inherits WRITEABLE from its base array at creation time, but a view of a writeable array may be subsequently locked while the base array remains writeable. (The opposite is not true, in that a view of a locked array may not be made writeable. However, currently, locking a base object does not lock any views that already reference it, so under that circumstance it is possible to alter the contents of a locked array via a previously created writeable view onto it.) Attempting to change a non-writeable array raises a RuntimeError exception. ALIGNED (A) The data and all elements are aligned appropriately for the hardware. WRITEBACKIFCOPY (X) This array is a copy of some other array. The C-API function PyArray_ResolveWritebackIfCopy must be called before deallocating to the base array will be updated with the contents of this array. UPDATEIFCOPY (U) (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. When this array is deallocated, the base array will be updated with the contents of this array. FNC F_CONTIGUOUS and not C_CONTIGUOUS. FORC F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). BEHAVED (B) ALIGNED and WRITEABLE. CARRAY (CA) BEHAVED and C_CONTIGUOUS. FARRAY (FA) BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. Notes ----- The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag names are only supported in dictionary access. Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by the user, via direct assignment to the attribute or dictionary entry, or by calling `ndarray.setflags`. The array flags cannot be set arbitrarily: - UPDATEIFCOPY can only be set ``False``. - WRITEBACKIFCOPY can only be set ``False``. - ALIGNED can only be set ``True`` if the data is truly aligned. - WRITEABLE can only be set ``True`` if the array owns its own memory or the ultimate owner of the memory exposes a writeable buffer interface or is a string. Arrays can be both C-style and Fortran-style contiguous simultaneously. This is clear for 1-dimensional arrays, but can also be true for higher dimensional arrays. Even for contiguous arrays a stride for a given dimension ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` or the array has no elements. It does *not* generally hold that ``self.strides[-1] == self.itemsize`` for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for Fortran-style contiguous arrays is true. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', """ A 1-D iterator over the array. This is a `numpy.flatiter` instance, which acts similarly to, but is not a subclass of, Python's built-in iterator object. See Also -------- flatten : Return a copy of the array collapsed into one dimension. flatiter Examples -------- >>> x = np.arange(1, 7).reshape(2, 3) >>> x array([[1, 2, 3], [4, 5, 6]]) >>> x.flat[3] 4 >>> x.T array([[1, 4], [2, 5], [3, 6]]) >>> x.T.flat[3] 5 >>> type(x.flat) <type 'numpy.flatiter'> An assignment example: >>> x.flat = 3; x array([[3, 3, 3], [3, 3, 3]]) >>> x.flat[[1,4]] = 1; x array([[3, 1, 3], [3, 1, 3]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', """ Total bytes consumed by the elements of the array. Notes ----- Does not include memory consumed by non-element attributes of the array object. Examples -------- >>> x = np.zeros((3,5,2), dtype=np.complex128) >>> x.nbytes 480 >>> np.prod(x.shape) * x.itemsize 480 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', """ Number of array dimensions. Examples -------- >>> x = np.array([1, 2, 3]) >>> x.ndim 1 >>> y = np.zeros((2, 3, 4)) >>> y.ndim 3 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('real', """ The real part of the array. Examples -------- >>> x = np.sqrt([1+0j, 0+1j]) >>> x.real array([ 1. , 0.70710678]) >>> x.real.dtype dtype('float64') See Also -------- numpy.real : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', """ Tuple of array dimensions. The shape property is usually used to get the current shape of an array, but may also be used to reshape the array in-place by assigning a tuple of array dimensions to it. As with `numpy.reshape`, one of the new shape dimensions can be -1, in which case its value is inferred from the size of the array and the remaining dimensions. Reshaping an array in-place will fail if a copy is required. Examples -------- >>> x = np.array([1, 2, 3, 4]) >>> x.shape (4,) >>> y = np.zeros((2, 3, 4)) >>> y.shape (2, 3, 4) >>> y.shape = (3, 8) >>> y array([[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]]) >>> y.shape = (3, 6) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: total size of new array must be unchanged >>> np.zeros((4,2))[::2].shape = (-1,) Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: incompatible shape for a non-contiguous array See Also -------- numpy.reshape : similar function ndarray.reshape : similar method """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('size', """ Number of elements in the array. Equal to ``np.prod(a.shape)``, i.e., the product of the array's dimensions. Notes ----- `a.size` returns a standard arbitrary precision Python integer. This may not be the case with other methods of obtaining the same value (like the suggested ``np.prod(a.shape)``, which returns an instance of ``np.int_``), and may be relevant if the value is used further in calculations that may overflow a fixed size integer type. Examples -------- >>> x = np.zeros((3, 5, 2), dtype=np.complex128) >>> x.size 30 >>> np.prod(x.shape) 30 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', """ Tuple of bytes to step in each dimension when traversing an array. The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` is:: offset = sum(np.array(i) * a.strides) A more detailed explanation of strides can be found in the "ndarray.rst" file in the NumPy reference guide. Notes ----- Imagine an array of 32-bit integers (each 4 bytes):: x = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], dtype=np.int32) This array is stored in memory as 40 bytes, one after the other (known as a contiguous block of memory). The strides of an array tell us how many bytes we have to skip in memory to move to the next position along a certain axis. For example, we have to skip 4 bytes (1 value) to move to the next column, but 20 bytes (5 values) to get to the same position in the next row. As such, the strides for the array `x` will be ``(20, 4)``. See Also -------- numpy.lib.stride_tricks.as_strided Examples -------- >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) >>> y array([[[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]) >>> y.strides (48, 16, 4) >>> y[1,1,1] 17 >>> offset=sum(y.strides * np.array((1,1,1))) >>> offset/y.itemsize 17 >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) >>> x.strides (32, 4, 224, 1344) >>> i = np.array([3,5,2,2]) >>> offset = sum(i * x.strides) >>> x[3,5,2,2] 813 >>> offset / x.itemsize 813 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('T', """ Same as self.transpose(), except that self is returned if self.ndim < 2. Examples -------- >>> x = np.array([[1.,2.],[3.,4.]]) >>> x array([[ 1., 2.], [ 3., 4.]]) >>> x.T array([[ 1., 3.], [ 2., 4.]]) >>> x = np.array([1.,2.,3.,4.]) >>> x array([ 1., 2., 3., 4.]) >>> x.T array([ 1., 2., 3., 4.]) """)) ############################################################################## # # ndarray methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', """ a.__array__(|dtype) -> reference if type unchanged, copy otherwise. Returns either a new reference to self if dtype is not given or a new array of provided data type if dtype is different from the current dtype of the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', """a.__array_prepare__(obj) -> Object of same type as ndarray object obj. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', """a.__array_wrap__(obj) -> Object of same type as ndarray object a. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', """a.__copy__() Used if :func:`copy.copy` is called on an array. Returns a copy of the array. Equivalent to ``a.copy(order='K')``. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', """a.__deepcopy__(memo, /) -> Deep copy of array. Used if :func:`copy.deepcopy` is called on an array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', """a.__reduce__() For pickling. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', """a.__setstate__(state, /) For unpickling. The `state` argument must be a sequence that contains the following elements: Parameters ---------- version : int optional pickle version. If omitted defaults to 0. shape : tuple dtype : data-type isFortran : bool rawdata : string or list a binary string with the data (or a list if 'a' is an object array) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('all', """ a.all(axis=None, out=None, keepdims=False) Returns True if all elements evaluate to True. Refer to `numpy.all` for full documentation. See Also -------- numpy.all : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('any', """ a.any(axis=None, out=None, keepdims=False) Returns True if any of the elements of `a` evaluate to True. Refer to `numpy.any` for full documentation. See Also -------- numpy.any : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', """ a.argmax(axis=None, out=None) Return indices of the maximum values along the given axis. Refer to `numpy.argmax` for full documentation. See Also -------- numpy.argmax : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', """ a.argmin(axis=None, out=None) Return indices of the minimum values along the given axis of `a`. Refer to `numpy.argmin` for detailed documentation. See Also -------- numpy.argmin : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', """ a.argsort(axis=-1, kind='quicksort', order=None) Returns the indices that would sort this array. Refer to `numpy.argsort` for full documentation. See Also -------- numpy.argsort : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', """ a.argpartition(kth, axis=-1, kind='introselect', order=None) Returns the indices that would partition this array. Refer to `numpy.argpartition` for full documentation. .. versionadded:: 1.8.0 See Also -------- numpy.argpartition : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', """ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) Copy of the array, cast to a specified type. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout order of the result. 'C' means C order, 'F' means Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. subok : bool, optional If True, then sub-classes will be passed-through (default), otherwise the returned array will be forced to be a base-class array. copy : bool, optional By default, astype always returns a newly allocated array. If this is set to false, and the `dtype`, `order`, and `subok` requirements are satisfied, the input array is returned instead of a copy. Returns ------- arr_t : ndarray Unless `copy` is False and the other conditions for returning the input array are satisfied (see description for `copy` input parameter), `arr_t` is a new array of the same shape as the input array, with dtype, order given by `dtype`, `order`. Notes ----- Starting in NumPy 1.9, astype method now returns an error if the string dtype to cast to is not long enough in 'safe' casting mode to hold the max value of integer/float array that is being casted. Previously the casting was allowed even if the result was truncated. Raises ------ ComplexWarning When casting from complex to float or int. To avoid this, one should use ``a.real.astype(t)``. Examples -------- >>> x = np.array([1, 2, 2.5]) >>> x array([ 1. , 2. , 2.5]) >>> x.astype(int) array([1, 2, 2]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', """ a.byteswap(inplace=False) Swap the bytes of the array elements Toggle between low-endian and big-endian data representation by returning a byteswapped array, optionally swapped in-place. Parameters ---------- inplace : bool, optional If ``True``, swap bytes in-place, default is ``False``. Returns ------- out : ndarray The byteswapped array. If `inplace` is ``True``, this is a view to self. Examples -------- >>> A = np.array([1, 256, 8755], dtype=np.int16) >>> map(hex, A) ['0x1', '0x100', '0x2233'] >>> A.byteswap(inplace=True) array([ 256, 1, 13090], dtype=int16) >>> map(hex, A) ['0x100', '0x1', '0x3322'] Arrays of strings are not swapped >>> A = np.array(['ceg', 'fac']) >>> A.byteswap() array(['ceg', 'fac'], dtype='|S3') """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', """ a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. Refer to `numpy.choose` for full documentation. See Also -------- numpy.choose : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', """ a.clip(min=None, max=None, out=None) Return an array whose values are limited to ``[min, max]``. One of max or min must be given. Refer to `numpy.clip` for full documentation. See Also -------- numpy.clip : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', """ a.compress(condition, axis=None, out=None) Return selected slices of this array along given axis. Refer to `numpy.compress` for full documentation. See Also -------- numpy.compress : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', """ a.conj() Complex-conjugate all elements. Refer to `numpy.conjugate` for full documentation. See Also -------- numpy.conjugate : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', """ a.conjugate() Return the complex conjugate, element-wise. Refer to `numpy.conjugate` for full documentation. See Also -------- numpy.conjugate : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', """ a.copy(order='C') Return a copy of the array. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :func:`numpy.copy` are very similar, but have different default values for their order= arguments.) See also -------- numpy.copy numpy.copyto Examples -------- >>> x = np.array([[1,2,3],[4,5,6]], order='F') >>> y = x.copy() >>> x.fill(0) >>> x array([[0, 0, 0], [0, 0, 0]]) >>> y array([[1, 2, 3], [4, 5, 6]]) >>> y.flags['C_CONTIGUOUS'] True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', """ a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. Refer to `numpy.cumprod` for full documentation. See Also -------- numpy.cumprod : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', """ a.cumsum(axis=None, dtype=None, out=None) Return the cumulative sum of the elements along the given axis. Refer to `numpy.cumsum` for full documentation. See Also -------- numpy.cumsum : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', """ a.diagonal(offset=0, axis1=0, axis2=1) Return specified diagonals. In NumPy 1.9 the returned array is a read-only view instead of a copy as in previous NumPy versions. In a future version the read-only restriction will be removed. Refer to :func:`numpy.diagonal` for full documentation. See Also -------- numpy.diagonal : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', """ a.dot(b, out=None) Dot product of two arrays. Refer to `numpy.dot` for full documentation. See Also -------- numpy.dot : equivalent function Examples -------- >>> a = np.eye(2) >>> b = np.ones((2, 2)) * 2 >>> a.dot(b) array([[ 2., 2.], [ 2., 2.]]) This array method can be conveniently chained: >>> a.dot(b).dot(b) array([[ 8., 8.], [ 8., 8.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', """a.dump(file) Dump a pickle of the array to the specified file. The array can be read back with pickle.load or numpy.load. Parameters ---------- file : str A string naming the dump file. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', """ a.dumps() Returns the pickle of the array as a string. pickle.loads or numpy.loads will convert the string back to an array. Parameters ---------- None """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', """ a.fill(value) Fill the array with a scalar value. Parameters ---------- value : scalar All elements of `a` will be assigned this value. Examples -------- >>> a = np.array([1, 2]) >>> a.fill(0) >>> a array([0, 0]) >>> a = np.empty(2) >>> a.fill(1) >>> a array([ 1., 1.]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', """ a.flatten(order='C') Return a copy of the array collapsed into one dimension. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran- style) order. 'A' means to flatten in column-major order if `a` is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten `a` in the order the elements occur in memory. The default is 'C'. Returns ------- y : ndarray A copy of the input array, flattened to one dimension. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the array. Examples -------- >>> a = np.array([[1,2], [3,4]]) >>> a.flatten() array([1, 2, 3, 4]) >>> a.flatten('F') array([1, 3, 2, 4]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', """ a.getfield(dtype, offset=0) Returns a field of the given array as a certain type. A field is a view of the array data with a given data-type. The values in the view are determined by the given type and the offset into the current array in bytes. The offset needs to be such that the view dtype fits in the array dtype; for example an array of dtype complex128 has 16-byte elements. If taking a view with a 32-bit integer (4 bytes), the offset needs to be between 0 and 12 bytes. Parameters ---------- dtype : str or dtype The data type of the view. The dtype size of the view can not be larger than that of the array itself. offset : int Number of bytes to skip before beginning the element view. Examples -------- >>> x = np.diag([1.+1.j]*2) >>> x[1, 1] = 2 + 4.j >>> x array([[ 1.+1.j, 0.+0.j], [ 0.+0.j, 2.+4.j]]) >>> x.getfield(np.float64) array([[ 1., 0.], [ 0., 2.]]) By choosing an offset of 8 bytes we can select the complex part of the array for our view: >>> x.getfield(np.float64, offset=8) array([[ 1., 0.], [ 0., 4.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('item', """ a.item(*args) Copy an element of an array to a standard Python scalar and return it. Parameters ---------- \\*args : Arguments (variable number and type) * none: in this case, the method only works for arrays with one element (`a.size == 1`), which element is copied into a standard Python scalar object and returned. * int_type: this argument is interpreted as a flat index into the array, specifying which element to copy and return. * tuple of int_types: functions as does a single int_type argument, except that the argument is interpreted as an nd-index into the array. Returns ------- z : Standard Python scalar object A copy of the specified element of the array as a suitable Python scalar Notes ----- When the data type of `a` is longdouble or clongdouble, item() returns a scalar array object because there is no available Python scalar that would not lose information. Void arrays return a buffer object for item(), unless fields are defined, in which case a tuple is returned. `item` is very similar to a[args], except, instead of an array scalar, a standard Python scalar is returned. This can be useful for speeding up access to elements of the array and doing arithmetic on elements of the array using Python's optimized math. Examples -------- >>> x = np.random.randint(9, size=(3, 3)) >>> x array([[3, 1, 7], [2, 8, 3], [8, 5, 3]]) >>> x.item(3) 2 >>> x.item(7) 5 >>> x.item((0, 1)) 1 >>> x.item((2, 2)) 3 """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', """ a.itemset(*args) Insert scalar into an array (scalar is cast to array's dtype, if possible) There must be at least 1 argument, and define the last argument as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster than ``a[args] = item``. The item should be a scalar value and `args` must select a single item in the array `a`. Parameters ---------- \\*args : Arguments If one argument: a scalar, only used in case `a` is of size 1. If two arguments: the last argument is the value to be set and must be a scalar, the first argument specifies a single array element location. It is either an int or a tuple. Notes ----- Compared to indexing syntax, `itemset` provides some speed increase for placing a scalar into a particular location in an `ndarray`, if you must do this. However, generally this is discouraged: among other problems, it complicates the appearance of the code. Also, when using `itemset` (and `item`) inside a loop, be sure to assign the methods to a local variable to avoid the attribute look-up at each loop iteration. Examples -------- >>> x = np.random.randint(9, size=(3, 3)) >>> x array([[3, 1, 7], [2, 8, 3], [8, 5, 3]]) >>> x.itemset(4, 0) >>> x.itemset((2, 2), 9) >>> x array([[3, 1, 7], [2, 0, 3], [8, 5, 9]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('max', """ a.max(axis=None, out=None, keepdims=False) Return the maximum along a given axis. Refer to `numpy.amax` for full documentation. See Also -------- numpy.amax : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', """ a.mean(axis=None, dtype=None, out=None, keepdims=False) Returns the average of the array elements along given axis. Refer to `numpy.mean` for full documentation. See Also -------- numpy.mean : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('min', """ a.min(axis=None, out=None, keepdims=False) Return the minimum along a given axis. Refer to `numpy.amin` for full documentation. See Also -------- numpy.amin : equivalent function """)) add_newdoc('numpy.core.multiarray', 'shares_memory', """ shares_memory(a, b, max_work=None) Determine if two arrays share memory Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem (maximum number of candidate solutions to consider). The following special values are recognized: max_work=MAY_SHARE_EXACT (default) The problem is solved exactly. In this case, the function returns True only if there is an element shared between the arrays. max_work=MAY_SHARE_BOUNDS Only the memory bounds of a and b are checked. Raises ------ numpy.TooHardError Exceeded max_work. Returns ------- out : bool See Also -------- may_share_memory Examples -------- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False """) add_newdoc('numpy.core.multiarray', 'may_share_memory', """ may_share_memory(a, b, max_work=None) Determine if two arrays might share memory A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. Only the memory bounds of a and b are checked by default. Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem. See `shares_memory` for details. Default for ``may_share_memory`` is to do a bounds check. Returns ------- out : bool See Also -------- shares_memory Examples -------- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) >>> np.may_share_memory(x[:,0], x[:,1]) True """) add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', """ arr.newbyteorder(new_order='S') Return the array with the same data viewed with a different byte order. Equivalent to:: arr.view(arr.dtype.newbytorder(new_order)) Changes are also made in all fields and sub-arrays of the array data type. Parameters ---------- new_order : string, optional Byte order to force; a value from the byte order specifications below. `new_order` codes can be any of: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first letter of `new_order` for the alternatives above. For example, any of 'B' or 'b' or 'biggish' are valid to specify big-endian. Returns ------- new_arr : array New array object with the dtype reflecting given change to the byte order. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', """ a.nonzero() Return the indices of the elements that are non-zero. Refer to `numpy.nonzero` for full documentation. See Also -------- numpy.nonzero : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', """ a.prod(axis=None, dtype=None, out=None, keepdims=False) Return the product of the array elements over the given axis Refer to `numpy.prod` for full documentation. See Also -------- numpy.prod : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', """ a.ptp(axis=None, out=None, keepdims=False) Peak to peak (maximum - minimum) value along a given axis. Refer to `numpy.ptp` for full documentation. See Also -------- numpy.ptp : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('put', """ a.put(indices, values, mode='raise') Set ``a.flat[n] = values[n]`` for all `n` in indices. Refer to `numpy.put` for full documentation. See Also -------- numpy.put : equivalent function """)) add_newdoc('numpy.core.multiarray', 'copyto', """ copyto(dst, src, casting='same_kind', where=True) Copies values from one array to another, broadcasting as necessary. Raises a TypeError if the `casting` rule is violated, and if `where` is provided, it selects which elements to copy. .. versionadded:: 1.7.0 Parameters ---------- dst : ndarray The array into which values are copied. src : array_like The array from which values are copied. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur when copying. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. where : array_like of bool, optional A boolean array which is broadcasted to match the dimensions of `dst`, and selects elements to copy from `src` to `dst` wherever it contains the value True. """) add_newdoc('numpy.core.multiarray', 'putmask', """ putmask(a, mask, values) Changes elements of an array based on conditional and input values. Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. If `values` is not the same size as `a` and `mask` then it will repeat. This gives behavior different from ``a[mask] = values``. Parameters ---------- a : array_like Target array. mask : array_like Boolean mask array. It has to be the same shape as `a`. values : array_like Values to put into `a` where `mask` is True. If `values` is smaller than `a` it will be repeated. See Also -------- place, put, take, copyto Examples -------- >>> x = np.arange(6).reshape(2, 3) >>> np.putmask(x, x>2, x**2) >>> x array([[ 0, 1, 2], [ 9, 16, 25]]) If `values` is smaller than `a` it is repeated: >>> x = np.arange(5) >>> np.putmask(x, x>1, [-33, -44]) >>> x array([ 0, 1, -33, -44, -33]) """) add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', """ a.ravel([order]) Return a flattened array. Refer to `numpy.ravel` for full documentation. See Also -------- numpy.ravel : equivalent function ndarray.flat : a flat iterator on the array. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', """ a.repeat(repeats, axis=None) Repeat elements of an array. Refer to `numpy.repeat` for full documentation. See Also -------- numpy.repeat : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', """ a.reshape(shape, order='C') Returns an array containing the same data with a new shape. Refer to `numpy.reshape` for full documentation. See Also -------- numpy.reshape : equivalent function Notes ----- Unlike the free function `numpy.reshape`, this method on `ndarray` allows the elements of the shape parameter to be passed in as separate arguments. For example, ``a.reshape(10, 11)`` is equivalent to ``a.reshape((10, 11))``. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', """ a.resize(new_shape, refcheck=True) Change shape and size of array in-place. Parameters ---------- new_shape : tuple of ints, or `n` ints Shape of resized array. refcheck : bool, optional If False, reference count will not be checked. Default is True. Returns ------- None Raises ------ ValueError If `a` does not own its own data or references or views to it exist, and the data memory must be changed. PyPy only: will always raise if the data memory must be changed, since there is no reliable way to determine if references or views to it exist. SystemError If the `order` keyword argument is specified. This behaviour is a bug in NumPy. See Also -------- resize : Return a new array with the specified shape. Notes ----- This reallocates space for the data area if necessary. Only contiguous arrays (data elements consecutive in memory) can be resized. The purpose of the reference count check is to make sure you do not use this array as a buffer for another Python object and then reallocate the memory. However, reference counts can increase in other ways so if you are sure that you have not shared the memory for this array with another Python object, then you may safely set `refcheck` to False. Examples -------- Shrinking an array: array is flattened (in the order that the data are stored in memory), resized, and reshaped: >>> a = np.array([[0, 1], [2, 3]], order='C') >>> a.resize((2, 1)) >>> a array([[0], [1]]) >>> a = np.array([[0, 1], [2, 3]], order='F') >>> a.resize((2, 1)) >>> a array([[0], [2]]) Enlarging an array: as above, but missing entries are filled with zeros: >>> b = np.array([[0, 1], [2, 3]]) >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple >>> b array([[0, 1, 2], [3, 0, 0]]) Referencing an array prevents resizing... >>> c = a >>> a.resize((1, 1)) Traceback (most recent call last): ... ValueError: cannot resize an array that has been referenced ... Unless `refcheck` is False: >>> a.resize((1, 1), refcheck=False) >>> a array([[0]]) >>> c array([[0]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('round', """ a.round(decimals=0, out=None) Return `a` with each element rounded to the given number of decimals. Refer to `numpy.around` for full documentation. See Also -------- numpy.around : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', """ a.searchsorted(v, side='left', sorter=None) Find indices where elements of v should be inserted in a to maintain order. For full documentation, see `numpy.searchsorted` See Also -------- numpy.searchsorted : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', """ a.setfield(val, dtype, offset=0) Put a value into a specified place in a field defined by a data-type. Place `val` into `a`'s field defined by `dtype` and beginning `offset` bytes into the field. Parameters ---------- val : object Value to be placed in field. dtype : dtype object Data-type of the field in which to place `val`. offset : int, optional The number of bytes into the field at which to place `val`. Returns ------- None See Also -------- getfield Examples -------- >>> x = np.eye(3) >>> x.getfield(np.float64) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> x.setfield(3, np.int32) >>> x.getfield(np.int32) array([[3, 3, 3], [3, 3, 3], [3, 3, 3]]) >>> x array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323], [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323], [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]]) >>> x.setfield(np.eye(3), np.int32) >>> x array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', """ a.setflags(write=None, align=None, uic=None) Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), respectively. These Boolean-valued flags affect how numpy interprets the memory area used by `a` (see Notes below). The ALIGNED flag can only be set to True if the data is actually aligned according to the type. The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set to True. The flag WRITEABLE can only be set to True if the array owns its own memory, or the ultimate owner of the memory exposes a writeable buffer interface, or is a string. (The exception for string is made so that unpickling can be done without copying memory.) Parameters ---------- write : bool, optional Describes whether or not `a` can be written to. align : bool, optional Describes whether or not `a` is aligned properly for its type. uic : bool, optional Describes whether or not `a` is a copy of another "base" array. Notes ----- Array flags provide information about how the memory area used for the array is to be interpreted. There are 7 Boolean flags in use, only four of which can be changed by the user: WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. WRITEABLE (W) the data area can be written to; ALIGNED (A) the data and strides are aligned appropriately for the hardware (as determined by the compiler); UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced by .base). When the C-API function PyArray_ResolveWritebackIfCopy is called, the base array will be updated with the contents of this array. All flags can be accessed using the single (upper case) letter as well as the full name. Examples -------- >>> y array([[3, 1, 7], [2, 0, 0], [8, 5, 9]]) >>> y.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : True WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(write=0, align=0) >>> y.flags C_CONTIGUOUS : True F_CONTIGUOUS : False OWNDATA : True WRITEABLE : False ALIGNED : False WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(uic=1) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: cannot set WRITEBACKIFCOPY flag to True """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', """ a.sort(axis=-1, kind='quicksort', order=None) Sort an array, in-place. Parameters ---------- axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. Default is 'quicksort'. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. See Also -------- numpy.sort : Return a sorted copy of an array. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in sorted array. partition: Partial sort. Notes ----- See ``sort`` for notes on the different sorting algorithms. Examples -------- >>> a = np.array([[1,4], [3,1]]) >>> a.sort(axis=1) >>> a array([[1, 4], [1, 3]]) >>> a.sort(axis=0) >>> a array([[1, 3], [1, 4]]) Use the `order` keyword to specify a field to use when sorting a structured array: >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) >>> a.sort(order='y') >>> a array([('c', 1), ('a', 2)], dtype=[('x', '|S1'), ('y', '<i4')]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('partition', """ a.partition(kth, axis=-1, kind='introselect', order=None) Rearranges the elements in the array in such a way that the value of the element in kth position is in the position it would be in a sorted array. All elements smaller than the kth element are moved before this element and all equal or greater are moved behind it. The ordering of the elements in the two partitions is undefined. .. versionadded:: 1.8.0 Parameters ---------- kth : int or sequence of ints Element index to partition by. The kth element value will be in its final sorted position and all smaller elements will be moved before it and all equal or greater elements behind it. The order of all elements in the partitions is undefined. If provided with a sequence of kth it will partition all elements indexed by kth of them into their sorted position at once. axis : int, optional Axis along which to sort. Default is -1, which means sort along the last axis. kind : {'introselect'}, optional Selection algorithm. Default is 'introselect'. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can be specified as a string, and not all fields need to be specified, but unspecified fields will still be used, in the order in which they come up in the dtype, to break ties. See Also -------- numpy.partition : Return a parititioned copy of an array. argpartition : Indirect partition. sort : Full sort. Notes ----- See ``np.partition`` for notes on the different algorithms. Examples -------- >>> a = np.array([3, 4, 2, 1]) >>> a.partition(3) >>> a array([2, 1, 3, 4]) >>> a.partition((1, 3)) array([1, 2, 3, 4]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', """ a.squeeze(axis=None) Remove single-dimensional entries from the shape of `a`. Refer to `numpy.squeeze` for full documentation. See Also -------- numpy.squeeze : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('std', """ a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) Returns the standard deviation of the array elements along given axis. Refer to `numpy.std` for full documentation. See Also -------- numpy.std : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', """ a.sum(axis=None, dtype=None, out=None, keepdims=False) Return the sum of the array elements over the given axis. Refer to `numpy.sum` for full documentation. See Also -------- numpy.sum : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', """ a.swapaxes(axis1, axis2) Return a view of the array with `axis1` and `axis2` interchanged. Refer to `numpy.swapaxes` for full documentation. See Also -------- numpy.swapaxes : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('take', """ a.take(indices, axis=None, out=None, mode='raise') Return an array formed from the elements of `a` at the given indices. Refer to `numpy.take` for full documentation. See Also -------- numpy.take : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', """ a.tofile(fid, sep="", format="%s") Write array to a file as text or binary (default). Data is always written in 'C' order, independent of the order of `a`. The data produced by this method can be recovered using the function fromfile(). Parameters ---------- fid : file or str An open file object, or a string containing a filename. sep : str Separator between array items for text output. If "" (empty), a binary file is written, equivalent to ``file.write(a.tobytes())``. format : str Format string for text file output. Each entry in the array is formatted to text by first converting it to the closest Python type, and then using "format" % item. Notes ----- This is a convenience function for quick storage of array data. Information on endianness and precision is lost, so this method is not a good choice for files intended to archive data or transport data between machines with different endianness. Some of these problems can be overcome by outputting the data as text files, at the expense of speed and file size. When fid is a file object, array contents are directly written to the file, bypassing the file object's ``write`` method. As a result, tofile cannot be used with files objects supporting compression (e.g., GzipFile) or file-like objects that do not support ``fileno()`` (e.g., BytesIO). """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', """ a.tolist() Return the array as a (possibly nested) list. Return a copy of the array data as a (nested) Python list. Data items are converted to the nearest compatible Python type. Parameters ---------- none Returns ------- y : list The possibly nested list of array elements. Notes ----- The array may be recreated, ``a = np.array(a.tolist())``. Examples -------- >>> a = np.array([1, 2]) >>> a.tolist() [1, 2] >>> a = np.array([[1, 2], [3, 4]]) >>> list(a) [array([1, 2]), array([3, 4])] >>> a.tolist() [[1, 2], [3, 4]] """)) tobytesdoc = """ a.{name}(order='C') Construct Python bytes containing the raw data bytes in the array. Constructs Python bytes showing a copy of the raw contents of data memory. The bytes object can be produced in either 'C' or 'Fortran', or 'Any' order (the default is 'C'-order). 'Any' order means C-order unless the F_CONTIGUOUS flag in the array is set, in which case it means 'Fortran' order. {deprecated} Parameters ---------- order : {{'C', 'F', None}}, optional Order of the data for multidimensional arrays: C, Fortran, or the same as for the original array. Returns ------- s : bytes Python bytes exhibiting a copy of `a`'s raw data. Examples -------- >>> x = np.array([[0, 1], [2, 3]]) >>> x.tobytes() b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' >>> x.tobytes('C') == x.tobytes() True >>> x.tobytes('F') b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' """ add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', tobytesdoc.format(name='tostring', deprecated= 'This function is a compatibility ' 'alias for tobytes. Despite its ' 'name it returns bytes not ' 'strings.'))) add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', tobytesdoc.format(name='tobytes', deprecated='.. versionadded:: 1.9.0'))) add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', """ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) Return the sum along diagonals of the array. Refer to `numpy.trace` for full documentation. See Also -------- numpy.trace : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', """ a.transpose(*axes) Returns a view of the array with axes transposed. For a 1-D array, this has no effect. (To change between column and row vectors, first cast the 1-D array into a matrix object.) For a 2-D array, this is the usual matrix transpose. For an n-D array, if axes are given, their order indicates how the axes are permuted (see Examples). If axes are not provided and ``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then ``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. Parameters ---------- axes : None, tuple of ints, or `n` ints * None or no argument: reverses the order of the axes. * tuple of ints: `i` in the `j`-th place in the tuple means `a`'s `i`-th axis becomes `a.transpose()`'s `j`-th axis. * `n` ints: same as an n-tuple of the same ints (this form is intended simply as a "convenience" alternative to the tuple form) Returns ------- out : ndarray View of `a`, with axes suitably permuted. See Also -------- ndarray.T : Array property returning the array transposed. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], [3, 4]]) >>> a.transpose() array([[1, 3], [2, 4]]) >>> a.transpose((1, 0)) array([[1, 3], [2, 4]]) >>> a.transpose(1, 0) array([[1, 3], [2, 4]]) """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('var', """ a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) Returns the variance of the array elements, along given axis. Refer to `numpy.var` for full documentation. See Also -------- numpy.var : equivalent function """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('view', """ a.view(dtype=None, type=None) New view of array with the same data. Parameters ---------- dtype : data-type or ndarray sub-class, optional Data-type descriptor of the returned view, e.g., float32 or int16. The default, None, results in the view having the same data-type as `a`. This argument can also be specified as an ndarray sub-class, which then specifies the type of the returned object (this is equivalent to setting the ``type`` parameter). type : Python type, optional Type of the returned view, e.g., ndarray or matrix. Again, the default None results in type preservation. Notes ----- ``a.view()`` is used two different ways: ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view of the array's memory with a different data-type. This can cause a reinterpretation of the bytes of memory. ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just returns an instance of `ndarray_subclass` that looks at the same array (same shape, dtype, etc.) This does not cause a reinterpretation of the memory. For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of bytes per entry than the previous dtype (for example, converting a regular array to a structured array), then the behavior of the view cannot be predicted just from the superficial appearance of ``a`` (shown by ``print(a)``). It also depends on exactly how ``a`` is stored in memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus defined as a slice or transpose, etc., the view may give different results. Examples -------- >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) Viewing array data using a different type and dtype: >>> y = x.view(dtype=np.int16, type=np.matrix) >>> y matrix([[513]], dtype=int16) >>> print(type(y)) <class 'numpy.matrixlib.defmatrix.matrix'> Creating a view on a structured array so it can be used in calculations >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) >>> xv = x.view(dtype=np.int8).reshape(-1,2) >>> xv array([[1, 2], [3, 4]], dtype=int8) >>> xv.mean(0) array([ 2., 3.]) Making changes to the view changes the underlying array >>> xv[0,1] = 20 >>> print(x) [(1, 20) (3, 4)] Using a view to convert an array to a recarray: >>> z = x.view(np.recarray) >>> z.a array([1], dtype=int8) Views share data: >>> x[0] = (9, 10) >>> z[0] (9, 10) Views that change the dtype size (bytes per entry) should normally be avoided on arrays defined by slices, transposes, fortran-ordering, etc.: >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) >>> y = x[:, 0:2] >>> y array([[1, 2], [4, 5]], dtype=int16) >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: new type not compatible with array. >>> z = y.copy() >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) array([[(1, 2)], [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')]) """)) ############################################################################## # # umath functions # ############################################################################## add_newdoc('numpy.core.umath', 'frompyfunc', """ frompyfunc(func, nin, nout) Takes an arbitrary Python function and returns a NumPy ufunc. Can be used, for example, to add broadcasting to a built-in Python function (see Examples section). Parameters ---------- func : Python function object An arbitrary Python function. nin : int The number of input arguments. nout : int The number of objects returned by `func`. Returns ------- out : ufunc Returns a NumPy universal function (``ufunc``) object. See Also -------- vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy Notes ----- The returned ufunc always returns PyObject arrays. Examples -------- Use frompyfunc to add broadcasting to the Python function ``oct``: >>> oct_array = np.frompyfunc(oct, 1, 1) >>> oct_array(np.array((10, 30, 100))) array([012, 036, 0144], dtype=object) >>> np.array((oct(10), oct(30), oct(100))) # for comparison array(['012', '036', '0144'], dtype='|S4') """) add_newdoc('numpy.core.umath', 'geterrobj', """ geterrobj() Return the current object that defines floating-point error handling. The error object contains all information that defines the error handling behavior in NumPy. `geterrobj` is used internally by the other functions that get and set error handling behavior (`geterr`, `seterr`, `geterrcall`, `seterrcall`). Returns ------- errobj : list The error object, a list containing three elements: [internal numpy buffer size, error mask, error callback function]. The error mask is a single integer that holds the treatment information on all four floating point errors. The information for each error type is contained in three bits of the integer. If we print it in base 8, we can see what treatment is set for "invalid", "under", "over", and "divide" (in that order). The printed string can be interpreted with * 0 : 'ignore' * 1 : 'warn' * 2 : 'raise' * 3 : 'call' * 4 : 'print' * 5 : 'log' See Also -------- seterrobj, seterr, geterr, seterrcall, geterrcall getbufsize, setbufsize Notes ----- For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- >>> np.geterrobj() # first get the defaults [10000, 0, None] >>> def err_handler(type, flag): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> old_bufsize = np.setbufsize(20000) >>> old_err = np.seterr(divide='raise') >>> old_handler = np.seterrcall(err_handler) >>> np.geterrobj() [20000, 2, <function err_handler at 0x91dcaac>] >>> old_err = np.seterr(all='ignore') >>> np.base_repr(np.geterrobj()[1], 8) '0' >>> old_err = np.seterr(divide='warn', over='log', under='call', invalid='print') >>> np.base_repr(np.geterrobj()[1], 8) '4351' """) add_newdoc('numpy.core.umath', 'seterrobj', """ seterrobj(errobj) Set the object that defines floating-point error handling. The error object contains all information that defines the error handling behavior in NumPy. `seterrobj` is used internally by the other functions that set error handling behavior (`seterr`, `seterrcall`). Parameters ---------- errobj : list The error object, a list containing three elements: [internal numpy buffer size, error mask, error callback function]. The error mask is a single integer that holds the treatment information on all four floating point errors. The information for each error type is contained in three bits of the integer. If we print it in base 8, we can see what treatment is set for "invalid", "under", "over", and "divide" (in that order). The printed string can be interpreted with * 0 : 'ignore' * 1 : 'warn' * 2 : 'raise' * 3 : 'call' * 4 : 'print' * 5 : 'log' See Also -------- geterrobj, seterr, geterr, seterrcall, geterrcall getbufsize, setbufsize Notes ----- For complete documentation of the types of floating-point exceptions and treatment options, see `seterr`. Examples -------- >>> old_errobj = np.geterrobj() # first get the defaults >>> old_errobj [10000, 0, None] >>> def err_handler(type, flag): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... >>> new_errobj = [20000, 12, err_handler] >>> np.seterrobj(new_errobj) >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') '14' >>> np.geterr() {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} >>> np.geterrcall() is err_handler True """) ############################################################################## # # compiled_base functions # ############################################################################## add_newdoc('numpy.core.multiarray', 'digitize', """ digitize(x, bins, right=False) Return the indices of the bins to which each value in input array belongs. ========= ============= ============================ `right` order of bins returned index `i` satisfies ========= ============= ============================ ``False`` increasing ``bins[i-1] <= x < bins[i]`` ``True`` increasing ``bins[i-1] < x <= bins[i]`` ``False`` decreasing ``bins[i-1] > x >= bins[i]`` ``True`` decreasing ``bins[i-1] >= x > bins[i]`` ========= ============= ============================ If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. Parameters ---------- x : array_like Input array to be binned. Prior to NumPy 1.10.0, this array had to be 1-dimensional, but can now have any shape. bins : array_like Array of bins. It has to be 1-dimensional and monotonic. right : bool, optional Indicating whether the intervals include the right or the left bin edge. Default behavior is (right==False) indicating that the interval does not include the right edge. The left bin end is open in this case, i.e., bins[i-1] <= x < bins[i] is the default behavior for monotonically increasing bins. Returns ------- indices : ndarray of ints Output array of indices, of same shape as `x`. Raises ------ ValueError If `bins` is not monotonic. TypeError If the type of the input is complex. See Also -------- bincount, histogram, unique, searchsorted Notes ----- If values in `x` are such that they fall outside the bin range, attempting to index `bins` with the indices that `digitize` returns will result in an IndexError. .. versionadded:: 1.10.0 `np.digitize` is implemented in terms of `np.searchsorted`. This means that a binary search is used to bin the values, which scales much better for larger number of bins than the previous linear search. It also removes the requirement for the input array to be 1-dimensional. For monotonically _increasing_ `bins`, the following are equivalent:: np.digitize(x, bins, right=True) np.searchsorted(bins, x, side='left') Note that as the order of the arguments are reversed, the side must be too. The `searchsorted` call is marginally faster, as it does not do any monotonicity checks. Perhaps more importantly, it supports all dtypes. Examples -------- >>> x = np.array([0.2, 6.4, 3.0, 1.6]) >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) >>> inds = np.digitize(x, bins) >>> inds array([1, 4, 3, 2]) >>> for n in range(x.size): ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) ... 0.0 <= 0.2 < 1.0 4.0 <= 6.4 < 10.0 2.5 <= 3.0 < 4.0 1.0 <= 1.6 < 2.5 >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) >>> bins = np.array([0, 5, 10, 15, 20]) >>> np.digitize(x,bins,right=True) array([1, 2, 3, 4, 4]) >>> np.digitize(x,bins,right=False) array([1, 3, 3, 4, 5]) """) add_newdoc('numpy.core.multiarray', 'bincount', """ bincount(x, weights=None, minlength=0) Count number of occurrences of each value in array of non-negative ints. The number of bins (of size 1) is one larger than the largest value in `x`. If `minlength` is specified, there will be at least this number of bins in the output array (though it will be longer if necessary, depending on the contents of `x`). Each bin gives the number of occurrences of its index value in `x`. If `weights` is specified the input array is weighted by it, i.e. if a value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead of ``out[n] += 1``. Parameters ---------- x : array_like, 1 dimension, nonnegative ints Input array. weights : array_like, optional Weights, array of the same shape as `x`. minlength : int, optional A minimum number of bins for the output array. .. versionadded:: 1.6.0 Returns ------- out : ndarray of ints The result of binning the input array. The length of `out` is equal to ``np.amax(x)+1``. Raises ------ ValueError If the input is not 1-dimensional, or contains elements with negative values, or if `minlength` is negative. TypeError If the type of the input is float or complex. See Also -------- histogram, digitize, unique Examples -------- >>> np.bincount(np.arange(5)) array([1, 1, 1, 1, 1]) >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) array([1, 3, 1, 1, 0, 0, 0, 1]) >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) >>> np.bincount(x).size == np.amax(x)+1 True The input array needs to be of integer dtype, otherwise a TypeError is raised: >>> np.bincount(np.arange(5, dtype=float)) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: array cannot be safely cast to required type A possible use of ``bincount`` is to perform sums over variable-size chunks of an array, using the ``weights`` keyword. >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights >>> x = np.array([0, 1, 1, 2, 2, 2]) >>> np.bincount(x, weights=w) array([ 0.3, 0.7, 1.1]) """) add_newdoc('numpy.core.multiarray', 'ravel_multi_index', """ ravel_multi_index(multi_index, dims, mode='raise', order='C') Converts a tuple of index arrays into an array of flat indices, applying boundary modes to the multi-index. Parameters ---------- multi_index : tuple of array_like A tuple of integer arrays, one array for each dimension. dims : tuple of ints The shape of array into which the indices from ``multi_index`` apply. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices are handled. Can specify either one mode or a tuple of modes, one mode per index. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range In 'clip' mode, a negative index which would normally wrap will clip to 0 instead. order : {'C', 'F'}, optional Determines whether the multi-index should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. Returns ------- raveled_indices : ndarray An array of indices into the flattened version of an array of dimensions ``dims``. See Also -------- unravel_index Notes ----- .. versionadded:: 1.6.0 Examples -------- >>> arr = np.array([[3,6,6],[4,5,1]]) >>> np.ravel_multi_index(arr, (7,6)) array([22, 41, 37]) >>> np.ravel_multi_index(arr, (7,6), order='F') array([31, 41, 13]) >>> np.ravel_multi_index(arr, (4,6), mode='clip') array([22, 23, 19]) >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) array([12, 13, 13]) >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) 1621 """) add_newdoc('numpy.core.multiarray', 'unravel_index', """ unravel_index(indices, dims, order='C') Converts a flat index or array of flat indices into a tuple of coordinate arrays. Parameters ---------- indices : array_like An integer array whose elements are indices into the flattened version of an array of dimensions ``dims``. Before version 1.6.0, this function accepted just one index value. dims : tuple of ints The shape of the array to use for unraveling ``indices``. order : {'C', 'F'}, optional Determines whether the indices should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. .. versionadded:: 1.6.0 Returns ------- unraveled_coords : tuple of ndarray Each array in the tuple has the same shape as the ``indices`` array. See Also -------- ravel_multi_index Examples -------- >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index(1621, (6,7,8,9)) (3, 1, 4, 1) """) add_newdoc('numpy.core.multiarray', 'add_docstring', """ add_docstring(obj, docstring) Add a docstring to a built-in obj if possible. If the obj already has a docstring raise a RuntimeError If this routine does not know how to add a docstring to the object raise a TypeError """) add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', """ add_ufunc_docstring(ufunc, new_docstring) Replace the docstring for a ufunc with new_docstring. This method will only work if the current docstring for the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) Parameters ---------- ufunc : numpy.ufunc A ufunc whose current doc is NULL. new_docstring : string The new docstring for the ufunc. Notes ----- This method allocates memory for new_docstring on the heap. Technically this creates a mempory leak, since this memory will not be reclaimed until the end of the program even if the ufunc itself is removed. However this will only be a problem if the user is repeatedly creating ufuncs with no documentation, adding documentation via add_newdoc_ufunc, and then throwing away the ufunc. """) add_newdoc('numpy.core.multiarray', 'packbits', """ packbits(myarray, axis=None) Packs the elements of a binary-valued array into bits in a uint8 array. The result is padded to full bytes by inserting zero bits at the end. Parameters ---------- myarray : array_like An array of integers or booleans whose elements should be packed to bits. axis : int, optional The dimension over which bit-packing is done. ``None`` implies packing the flattened array. Returns ------- packed : ndarray Array of type uint8 whose elements represent bits corresponding to the logical (0 or nonzero) value of the input elements. The shape of `packed` has the same number of dimensions as the input (unless `axis` is None, in which case the output is 1-D). See Also -------- unpackbits: Unpacks elements of a uint8 array into a binary-valued output array. Examples -------- >>> a = np.array([[[1,0,1], ... [0,1,0]], ... [[1,1,0], ... [0,0,1]]]) >>> b = np.packbits(a, axis=-1) >>> b array([[[160],[64]],[[192],[32]]], dtype=uint8) Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, and 32 = 0010 0000. """) add_newdoc('numpy.core.multiarray', 'unpackbits', """ unpackbits(myarray, axis=None) Unpacks elements of a uint8 array into a binary-valued output array. Each element of `myarray` represents a bit-field that should be unpacked into a binary-valued output array. The shape of the output array is either 1-D (if `axis` is None) or the same shape as the input array with unpacking done along the axis specified. Parameters ---------- myarray : ndarray, uint8 type Input array. axis : int, optional The dimension over which bit-unpacking is done. ``None`` implies unpacking the flattened array. Returns ------- unpacked : ndarray, uint8 type The elements are binary-valued (0 or 1). See Also -------- packbits : Packs the elements of a binary-valued array into bits in a uint8 array. Examples -------- >>> a = np.array([[2], [7], [23]], dtype=np.uint8) >>> a array([[ 2], [ 7], [23]], dtype=uint8) >>> b = np.unpackbits(a, axis=1) >>> b array([[0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) """) add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', """ format_float_OSprintf_g(val, precision) Print a floating point scalar using the system's printf function, equivalent to: printf("%.*g", precision, val); for half/float/double, or replacing 'g' by 'Lg' for longdouble. This method is designed to help cross-validate the format_float_* methods. Parameters ---------- val : python float or numpy floating scalar Value to format. precision : non-negative integer, optional Precision given to printf. Returns ------- rep : string The string representation of the floating point value See Also -------- format_float_scientific format_float_positional """) ############################################################################## # # Documentation for ufunc attributes and methods # ############################################################################## ############################################################################## # # ufunc object # ############################################################################## add_newdoc('numpy.core', 'ufunc', """ Functions that operate element by element on whole arrays. To see the documentation for a specific ufunc, use `info`. For example, ``np.info(np.sin)``. Because ufuncs are written in C (for speed) and linked into Python with NumPy's ufunc facility, Python's help() function finds this page whenever help() is called on a ufunc. A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. Calling ufuncs: =============== op(*x[, out], where=True, **kwargs) Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. The broadcasting rules are: * Dimensions of length 1 may be prepended to either array. * Arrays may be repeated along dimensions of length 1. Parameters ---------- *x : array_like Input arrays. out : ndarray, None, or tuple of ndarray and None, optional Alternate array object(s) in which to put the result; if provided, it must have a shape that the inputs broadcast to. A tuple of arrays (possible only as a keyword argument) must have length equal to the number of outputs; use `None` for uninitialized outputs to be allocated by the ufunc. where : array_like, optional Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. Note that if an uninitialized return array is created via the default ``out=None``, then the elements where the values are False will remain uninitialized. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. Returns ------- r : ndarray or tuple of ndarray `r` will have the shape that the arrays in `x` broadcast to; if `out` is provided, it will be returned. If not, `r` will be allocated and may contain uninitialized values. If the function has more than one output, then the result will be a tuple of arrays. """) ############################################################################## # # ufunc attributes # ############################################################################## add_newdoc('numpy.core', 'ufunc', ('identity', """ The identity value. Data attribute containing the identity element for the ufunc, if it has one. If it does not, the attribute value is None. Examples -------- >>> np.add.identity 0 >>> np.multiply.identity 1 >>> np.power.identity 1 >>> print(np.exp.identity) None """)) add_newdoc('numpy.core', 'ufunc', ('nargs', """ The number of arguments. Data attribute containing the number of arguments the ufunc takes, including optional ones. Notes ----- Typically this value will be one more than what you might expect because all ufuncs take the optional "out" argument. Examples -------- >>> np.add.nargs 3 >>> np.multiply.nargs 3 >>> np.power.nargs 3 >>> np.exp.nargs 2 """)) add_newdoc('numpy.core', 'ufunc', ('nin', """ The number of inputs. Data attribute containing the number of arguments the ufunc treats as input. Examples -------- >>> np.add.nin 2 >>> np.multiply.nin 2 >>> np.power.nin 2 >>> np.exp.nin 1 """)) add_newdoc('numpy.core', 'ufunc', ('nout', """ The number of outputs. Data attribute containing the number of arguments the ufunc treats as output. Notes ----- Since all ufuncs can take output arguments, this will always be (at least) 1. Examples -------- >>> np.add.nout 1 >>> np.multiply.nout 1 >>> np.power.nout 1 >>> np.exp.nout 1 """)) add_newdoc('numpy.core', 'ufunc', ('ntypes', """ The number of types. The number of numerical NumPy types - of which there are 18 total - on which the ufunc can operate. See Also -------- numpy.ufunc.types Examples -------- >>> np.add.ntypes 18 >>> np.multiply.ntypes 18 >>> np.power.ntypes 17 >>> np.exp.ntypes 7 >>> np.remainder.ntypes 14 """)) add_newdoc('numpy.core', 'ufunc', ('types', """ Returns a list with types grouped input->output. Data attribute listing the data-type "Domain-Range" groupings the ufunc can deliver. The data-types are given using the character codes. See Also -------- numpy.ufunc.ntypes Examples -------- >>> np.add.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.multiply.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.power.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'OO->O'] >>> np.exp.types ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] >>> np.remainder.types ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] """)) add_newdoc('numpy.core', 'ufunc', ('signature', """ Definition of the core elements a generalized ufunc operates on. The signature determines how the dimensions of each input/output array are split into core and loop dimensions: 1. Each dimension in the signature is matched to a dimension of the corresponding passed-in array, starting from the end of the shape tuple. 2. Core dimensions assigned to the same label in the signature must have exactly matching sizes, no broadcasting is performed. 3. The core dimensions are removed from all inputs and the remaining dimensions are broadcast together, defining the loop dimensions. Notes ----- Generalized ufuncs are used internally in many linalg functions, and in the testing suite; the examples below are taken from these. For ufuncs that operate on scalars, the signature is `None`, which is equivalent to '()' for every argument. Examples -------- >>> np.core.umath_tests.matrix_multiply.signature '(m,n),(n,p)->(m,p)' >>> np.linalg._umath_linalg.det.signature '(m,m)->()' >>> np.add.signature is None True # equivalent to '(),()->()' """)) ############################################################################## # # ufunc methods # ############################################################################## add_newdoc('numpy.core', 'ufunc', ('reduce', """ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial) Reduces `a`'s dimension by one, by applying ufunc along one axis. Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then :math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = the result of iterating `j` over :math:`range(N_i)`, cumulatively applying ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. For a one-dimensional array, reduce produces results equivalent to: :: r = op.identity # op = ufunc for i in range(len(A)): r = op(r, A[i]) return r For example, add.reduce() is equivalent to sum(). Parameters ---------- a : array_like The array to act on. axis : None or int or tuple of ints, optional Axis or axes along which a reduction is performed. The default (`axis` = 0) is perform a reduction over the first dimension of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.7.0 If this is `None`, a reduction is performed over all the axes. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. For operations which are either not commutative or not associative, doing a reduction over multiple axes is not well-defined. The ufuncs do not currently raise an exception in this case, but will likely do so in the future. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data-type of the output array if this is provided, or the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. .. versionadded:: 1.7.0 initial : scalar, optional The value with which to start the reduction. If the ufunc has no identity or the dtype is object, this defaults to None - otherwise it defaults to ufunc.identity. If ``None`` is given, the first element of the reduction is used, and an error is thrown if the reduction is empty. .. versionadded:: 1.15.0 Returns ------- r : ndarray The reduced array. If `out` was supplied, `r` is a reference to it. Examples -------- >>> np.multiply.reduce([2,3,5]) 30 A multi-dimensional array example: >>> X = np.arange(8).reshape((2,2,2)) >>> X array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.add.reduce(X, 0) array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X) # confirm: default axis value is 0 array([[ 4, 6], [ 8, 10]]) >>> np.add.reduce(X, 1) array([[ 2, 4], [10, 12]]) >>> np.add.reduce(X, 2) array([[ 1, 5], [ 9, 13]]) You can use the ``initial`` keyword argument to initialize the reduction with a different value. >>> np.add.reduce([10], initial=5) 15 >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10) array([14., 14.]) Allows reductions of empty arrays where they would normally fail, i.e. for ufuncs without an identity. >>> np.minimum.reduce([], initial=np.inf) inf >>> np.minimum.reduce([]) Traceback (most recent call last): ... ValueError: zero-size array to reduction operation minimum which has no identity """)) add_newdoc('numpy.core', 'ufunc', ('accumulate', """ accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. For a one-dimensional array, accumulate produces results equivalent to:: r = np.empty(len(A)) t = op.identity # op = the ufunc being applied to A's elements for i in range(len(A)): t = op(t, A[i]) r[i] = t return r For example, add.accumulate() is equivalent to np.cumsum(). For a multi-dimensional array, accumulate is applied along only one axis (axis zero by default; see Examples below) so repeated use is necessary if one wants to accumulate over multiple axes. Parameters ---------- array : array_like The array to act on. axis : int, optional The axis along which to apply the accumulation; default is zero. dtype : data-type code, optional The data-type used to represent the intermediate results. Defaults to the data-type of the output array if such is provided, or the the data-type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. Returns ------- r : ndarray The accumulated values. If `out` was supplied, `r` is a reference to `out`. Examples -------- 1-D array examples: >>> np.add.accumulate([2, 3, 5]) array([ 2, 5, 10]) >>> np.multiply.accumulate([2, 3, 5]) array([ 2, 6, 30]) 2-D array examples: >>> I = np.eye(2) >>> I array([[ 1., 0.], [ 0., 1.]]) Accumulate along axis 0 (rows), down columns: >>> np.add.accumulate(I, 0) array([[ 1., 0.], [ 1., 1.]]) >>> np.add.accumulate(I) # no axis specified = axis zero array([[ 1., 0.], [ 1., 1.]]) Accumulate along axis 1 (columns), through rows: >>> np.add.accumulate(I, 1) array([[ 1., 1.], [ 0., 1.]]) """)) add_newdoc('numpy.core', 'ufunc', ('reduceat', """ reduceat(a, indices, axis=0, dtype=None, out=None) Performs a (local) reduce with specified slices over a single axis. For i in ``range(len(indices))``, `reduceat` computes ``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th generalized "row" parallel to `axis` in the final result (i.e., in a 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if `axis = 1`, it becomes the i-th column). There are three exceptions to this: * when ``i = len(indices) - 1`` (so for the last index), ``indices[i+1] = a.shape[axis]``. * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is simply ``a[indices[i]]``. * if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. The shape of the output depends on the size of `indices`, and may be larger than `a` (this happens if ``len(indices) > a.shape[axis]``). Parameters ---------- a : array_like The array to act on. indices : array_like Paired indices, comma separated (not colon), specifying slices to reduce. axis : int, optional The axis along which to apply the reduceat. dtype : data-type code, optional The type used to represent the intermediate results. Defaults to the data type of the output array if this is provided, or the data type of the input array if no output array is provided. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. Returns ------- r : ndarray The reduced values. If `out` was supplied, `r` is a reference to `out`. Notes ----- A descriptive example: If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as ``ufunc.reduceat(a, indices)[::2]`` where `indices` is ``range(len(array) - 1)`` with a zero placed in every other element: ``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. Don't be fooled by this attribute's name: `reduceat(a)` is not necessarily smaller than `a`. Examples -------- To take the running sum of four successive values: >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] array([ 6, 10, 14, 18]) A 2-D example: >>> x = np.linspace(0, 15, 16).reshape(4,4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) :: # reduce such that the result has the following five rows: # [row1 + row2 + row3] # [row4] # [row2] # [row3] # [row1 + row2 + row3 + row4] >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) array([[ 12., 15., 18., 21.], [ 12., 13., 14., 15.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 24., 28., 32., 36.]]) :: # reduce such that result has the following two columns: # [col1 * col2 * col3, col4] >>> np.multiply.reduceat(x, [0, 3], 1) array([[ 0., 3.], [ 120., 7.], [ 720., 11.], [ 2184., 15.]]) """)) add_newdoc('numpy.core', 'ufunc', ('outer', """ outer(A, B, **kwargs) Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of ``op.outer(A, B)`` is an array of dimension M + N such that: .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) For `A` and `B` one-dimensional, this is equivalent to:: r = empty(len(A),len(B)) for i in range(len(A)): for j in range(len(B)): r[i,j] = op(A[i], B[j]) # op = ufunc in question Parameters ---------- A : array_like First array B : array_like Second array kwargs : any Arguments to pass on to the ufunc. Typically `dtype` or `out`. Returns ------- r : ndarray Output array See Also -------- numpy.outer Examples -------- >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) array([[ 4, 5, 6], [ 8, 10, 12], [12, 15, 18]]) A multi-dimensional example: >>> A = np.array([[1, 2, 3], [4, 5, 6]]) >>> A.shape (2, 3) >>> B = np.array([[1, 2, 3, 4]]) >>> B.shape (1, 4) >>> C = np.multiply.outer(A, B) >>> C.shape; C (2, 3, 1, 4) array([[[[ 1, 2, 3, 4]], [[ 2, 4, 6, 8]], [[ 3, 6, 9, 12]]], [[[ 4, 8, 12, 16]], [[ 5, 10, 15, 20]], [[ 6, 12, 18, 24]]]]) """)) add_newdoc('numpy.core', 'ufunc', ('at', """ at(a, indices, b=None) Performs unbuffered in place operation on operand 'a' for elements specified by 'indices'. For addition ufunc, this method is equivalent to ``a[indices] += b``, except that results are accumulated for elements that are indexed more than once. For example, ``a[[0,0]] += 1`` will only increment the first element once because of buffering, whereas ``add.at(a, [0,0], 1)`` will increment the first element twice. .. versionadded:: 1.8.0 Parameters ---------- a : array_like The array to perform in place operation on. indices : array_like or tuple Array like index object or slice object for indexing into first operand. If first operand has multiple dimensions, indices can be a tuple of array like index objects or slice objects. b : array_like Second operand for ufuncs requiring two operands. Operand must be broadcastable over first operand after indexing or slicing. Examples -------- Set items 0 and 1 to their negative values: >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) >>> print(a) array([-1, -2, 3, 4]) Increment items 0 and 1, and increment item 2 twice: >>> a = np.array([1, 2, 3, 4]) >>> np.add.at(a, [0, 1, 2, 2], 1) >>> print(a) array([2, 3, 5, 4]) Add items 0 and 1 in first array to second array, and store results in first array: >>> a = np.array([1, 2, 3, 4]) >>> b = np.array([1, 2]) >>> np.add.at(a, [0, 1], b) >>> print(a) array([2, 4, 3, 4]) """)) ############################################################################## # # Documentation for dtype attributes and methods # ############################################################################## ############################################################################## # # dtype object # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', """ dtype(obj, align=False, copy=False) Create a data type object. A numpy array is homogeneous, and contains elements described by a dtype object. A dtype object can be constructed from different combinations of fundamental numeric types. Parameters ---------- obj Object to be converted to a data type object. align : bool, optional Add padding to the fields to match what a C compiler would output for a similar C-struct. Can be ``True`` only if `obj` is a dictionary or a comma-separated string. If a struct dtype is being created, this also sets a sticky alignment flag ``isalignedstruct``. copy : bool, optional Make a new copy of the data-type object. If ``False``, the result may just be a reference to a built-in data-type object. See also -------- result_type Examples -------- Using array-scalar type: >>> np.dtype(np.int16) dtype('int16') Structured type, one field name 'f1', containing int16: >>> np.dtype([('f1', np.int16)]) dtype([('f1', '<i2')]) Structured type, one field named 'f1', in itself containing a structured type with one field: >>> np.dtype([('f1', [('f1', np.int16)])]) dtype([('f1', [('f1', '<i2')])]) Structured type, two fields: the first field contains an unsigned int, the second an int32: >>> np.dtype([('f1', np.uint), ('f2', np.int32)]) dtype([('f1', '<u4'), ('f2', '<i4')]) Using array-protocol type strings: >>> np.dtype([('a','f8'),('b','S10')]) dtype([('a', '<f8'), ('b', '|S10')]) Using comma-separated field formats. The shape is (2,3): >>> np.dtype("i4, (2,3)f8") dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))]) Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void`` is a flexible type, here of size 10: >>> np.dtype([('hello',(int,3)),('world',np.void,10)]) dtype([('hello', '<i4', 3), ('world', '|V10')]) Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are the offsets in bytes: >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) dtype(('<i2', [('x', '|i1'), ('y', '|i1')])) Using dictionaries. Two fields named 'gender' and 'age': >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) dtype([('gender', '|S1'), ('age', '|u1')]) Offsets in bytes, here 0 and 25: >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) dtype([('surname', '|S25'), ('age', '|u1')]) """) ############################################################################## # # dtype attributes # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', """ The required alignment (bytes) of this data-type according to the compiler. More information is available in the C-API section of the manual. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', """ A character indicating the byte-order of this data-type object. One of: === ============== '=' native '<' little-endian '>' big-endian '|' not applicable === ============== All built-in data-type objects have byteorder either '=' or '|'. Examples -------- >>> dt = np.dtype('i2') >>> dt.byteorder '=' >>> # endian is not relevant for 8 bit numbers >>> np.dtype('i1').byteorder '|' >>> # or ASCII strings >>> np.dtype('S2').byteorder '|' >>> # Even if specific code is given, and it is native >>> # '=' is the byteorder >>> import sys >>> sys_is_le = sys.byteorder == 'little' >>> native_code = sys_is_le and '<' or '>' >>> swapped_code = sys_is_le and '>' or '<' >>> dt = np.dtype(native_code + 'i2') >>> dt.byteorder '=' >>> # Swapped code shows up as itself >>> dt = np.dtype(swapped_code + 'i2') >>> dt.byteorder == swapped_code True """)) add_newdoc('numpy.core.multiarray', 'dtype', ('char', """A unique character code for each of the 21 different built-in types.""")) add_newdoc('numpy.core.multiarray', 'dtype', ('descr', """ PEP3118 interface description of the data-type. The format is that required by the 'descr' key in the PEP3118 `__array_interface__` attribute. Warning: This attribute exists specifically for PEP3118 compliance, and is not a datatype description compatible with `np.dtype`. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('fields', """ Dictionary of named fields defined for this data type, or ``None``. The dictionary is indexed by keys that are the names of the fields. Each entry in the dictionary is a tuple fully describing the field:: (dtype, offset[, title]) If present, the optional title can be any object (if it is a string or unicode then it will also be a key in the fields dictionary, otherwise it's meta-data). Notice also that the first two elements of the tuple can be passed directly as arguments to the ``ndarray.getfield`` and ``ndarray.setfield`` methods. See Also -------- ndarray.getfield, ndarray.setfield Examples -------- >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} """)) add_newdoc('numpy.core.multiarray', 'dtype', ('flags', """ Bit-flags describing how this data type is to be interpreted. Bit-masks are in `numpy.core.multiarray` as the constants `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation of these flags is in C-API documentation; they are largely useful for user-defined data-types. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', """ Boolean indicating whether this dtype contains any reference-counted objects in any fields or sub-dtypes. Recall that what is actually in the ndarray memory representing the Python object is the memory address of that object (a pointer). Special handling may be required, and this attribute is useful for distinguishing data types that may contain arbitrary Python objects and data-types that won't. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', """ Integer indicating how this dtype relates to the built-in dtypes. Read-only. = ======================================================================== 0 if this is a structured array type, with fields 1 if this is a dtype compiled into numpy (such as ints, floats etc) 2 if the dtype is for a user-defined numpy type A user-defined type uses the numpy C-API machinery to extend numpy to handle a new array type. See :ref:`user.user-defined-data-types` in the NumPy manual. = ======================================================================== Examples -------- >>> dt = np.dtype('i2') >>> dt.isbuiltin 1 >>> dt = np.dtype('f8') >>> dt.isbuiltin 1 >>> dt = np.dtype([('field1', 'f8')]) >>> dt.isbuiltin 0 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', """ Boolean indicating whether the byte order of this dtype is native to the platform. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', """ Boolean indicating whether the dtype is a struct which maintains field alignment. This flag is sticky, so when combining multiple structs together, it is preserved and produces new dtypes which are also aligned. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', """ The element size of this data-type object. For 18 of the 21 types this number is fixed by the data-type. For the flexible data-types, this number can be anything. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('kind', """ A character code (one of 'biufcmMOSUV') identifying the general kind of data. = ====================== b boolean i signed integer u unsigned integer f floating-point c complex floating-point m timedelta M datetime O object S (byte-)string U Unicode V void = ====================== """)) add_newdoc('numpy.core.multiarray', 'dtype', ('name', """ A bit-width name for this data-type. Un-sized flexible data-type objects do not have this attribute. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('names', """ Ordered list of field names, or ``None`` if there are no fields. The names are ordered according to increasing byte offset. This can be used, for example, to walk through all of the named fields in offset order. Examples -------- >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> dt.names ('name', 'grades') """)) add_newdoc('numpy.core.multiarray', 'dtype', ('num', """ A unique number for each of the 21 different built-in types. These are roughly ordered from least-to-most precision. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('shape', """ Shape tuple of the sub-array if this data type describes a sub-array, and ``()`` otherwise. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', """ Number of dimensions of the sub-array if this data type describes a sub-array, and ``0`` otherwise. .. versionadded:: 1.13.0 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('str', """The array-protocol typestring of this data-type object.""")) add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', """ Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and None otherwise. The *shape* is the fixed shape of the sub-array described by this data type, and *item_dtype* the data type of the array. If a field whose dtype object has this attribute is retrieved, then the extra dimensions implied by *shape* are tacked on to the end of the retrieved array. """)) add_newdoc('numpy.core.multiarray', 'dtype', ('type', """The type object used to instantiate a scalar of this data-type.""")) ############################################################################## # # dtype methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', """ newbyteorder(new_order='S') Return a new dtype with a different byte order. Changes are also made in all fields and sub-arrays of the data type. Parameters ---------- new_order : string, optional Byte order to force; a value from the byte order specifications below. The default value ('S') results in swapping the current byte order. `new_order` codes can be any of: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) The code does a case-insensitive check on the first letter of `new_order` for these alternatives. For example, any of '>' or 'B' or 'b' or 'brian' are valid to specify big-endian. Returns ------- new_dtype : dtype New dtype object with the given change to the byte order. Notes ----- Changes are also made in all fields and sub-arrays of the data type. Examples -------- >>> import sys >>> sys_is_le = sys.byteorder == 'little' >>> native_code = sys_is_le and '<' or '>' >>> swapped_code = sys_is_le and '>' or '<' >>> native_dt = np.dtype(native_code+'i2') >>> swapped_dt = np.dtype(swapped_code+'i2') >>> native_dt.newbyteorder('S') == swapped_dt True >>> native_dt.newbyteorder() == swapped_dt True >>> native_dt == swapped_dt.newbyteorder('S') True >>> native_dt == swapped_dt.newbyteorder('=') True >>> native_dt == swapped_dt.newbyteorder('N') True >>> native_dt == native_dt.newbyteorder('|') True >>> np.dtype('<i2') == native_dt.newbyteorder('<') True >>> np.dtype('<i2') == native_dt.newbyteorder('L') True >>> np.dtype('>i2') == native_dt.newbyteorder('>') True >>> np.dtype('>i2') == native_dt.newbyteorder('B') True """)) ############################################################################## # # Datetime-related Methods # ############################################################################## add_newdoc('numpy.core.multiarray', 'busdaycalendar', """ busdaycalendar(weekmask='1111100', holidays=None) A business day calendar object that efficiently stores information defining valid days for the busday family of functions. The default valid days are Monday through Friday ("business days"). A busdaycalendar object can be specified with any set of weekly valid days, plus an optional "holiday" dates that always will be invalid. Once a busdaycalendar object is created, the weekmask and holidays cannot be modified. .. versionadded:: 1.7.0 Parameters ---------- weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates, no matter which weekday they fall upon. Holiday dates may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. Returns ------- out : busdaycalendar A business day calendar object containing the specified weekmask and holidays values. See Also -------- is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. busday_count : Counts how many valid days are in a half-open date range. Attributes ---------- Note: once a busdaycalendar object is created, you cannot modify the weekmask or holidays. The attributes return copies of internal data. weekmask : (copy) seven-element array of bool holidays : (copy) sorted array of datetime64[D] Examples -------- >>> # Some important days in July ... bdd = np.busdaycalendar( ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) >>> # Default is Monday to Friday weekdays ... bdd.weekmask array([ True, True, True, True, True, False, False], dtype='bool') >>> # Any holidays already on the weekend are removed ... bdd.holidays array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') """) add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', """A copy of the seven-element boolean mask indicating valid days.""")) add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', """A copy of the holiday array indicating additional invalid days.""")) add_newdoc('numpy.core.multiarray', 'is_busday', """ is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None) Calculates which of the given dates are valid days, and which are not. .. versionadded:: 1.7.0 Parameters ---------- dates : array_like of datetime64[D] The array of dates to process. weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates. They may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. busdaycal : busdaycalendar, optional A `busdaycalendar` object which specifies the valid days. If this parameter is provided, neither weekmask nor holidays may be provided. out : array of bool, optional If provided, this array is filled with the result. Returns ------- out : array of bool An array with the same shape as ``dates``, containing True for each valid day, and False for each invalid day. See Also -------- busdaycalendar: An object that specifies a custom set of valid days. busday_offset : Applies an offset counted in valid days. busday_count : Counts how many valid days are in a half-open date range. Examples -------- >>> # The weekdays are Friday, Saturday, and Monday ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) array([False, False, True], dtype='bool') """) add_newdoc('numpy.core.multiarray', 'busday_offset', """ busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None) First adjusts the date to fall on a valid day according to the ``roll`` rule, then applies offsets to the given dates counted in valid days. .. versionadded:: 1.7.0 Parameters ---------- dates : array_like of datetime64[D] The array of dates to process. offsets : array_like of int The array of offsets, which is broadcast with ``dates``. roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional How to treat dates that do not fall on a valid day. The default is 'raise'. * 'raise' means to raise an exception for an invalid day. * 'nat' means to return a NaT (not-a-time) for an invalid day. * 'forward' and 'following' mean to take the first valid day later in time. * 'backward' and 'preceding' mean to take the first valid day earlier in time. * 'modifiedfollowing' means to take the first valid day later in time unless it is across a Month boundary, in which case to take the first valid day earlier in time. * 'modifiedpreceding' means to take the first valid day earlier in time unless it is across a Month boundary, in which case to take the first valid day later in time. weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates. They may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. busdaycal : busdaycalendar, optional A `busdaycalendar` object which specifies the valid days. If this parameter is provided, neither weekmask nor holidays may be provided. out : array of datetime64[D], optional If provided, this array is filled with the result. Returns ------- out : array of datetime64[D] An array with a shape from broadcasting ``dates`` and ``offsets`` together, containing the dates with offsets applied. See Also -------- busdaycalendar: An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_count : Counts how many valid days are in a half-open date range. Examples -------- >>> # First business day in October 2011 (not accounting for holidays) ... np.busday_offset('2011-10', 0, roll='forward') numpy.datetime64('2011-10-03','D') >>> # Last business day in February 2012 (not accounting for holidays) ... np.busday_offset('2012-03', -1, roll='forward') numpy.datetime64('2012-02-29','D') >>> # Third Wednesday in January 2011 ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') numpy.datetime64('2011-01-19','D') >>> # 2012 Mother's Day in Canada and the U.S. ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') numpy.datetime64('2012-05-13','D') >>> # First business day on or after a date ... np.busday_offset('2011-03-20', 0, roll='forward') numpy.datetime64('2011-03-21','D') >>> np.busday_offset('2011-03-22', 0, roll='forward') numpy.datetime64('2011-03-22','D') >>> # First business day after a date ... np.busday_offset('2011-03-20', 1, roll='backward') numpy.datetime64('2011-03-21','D') >>> np.busday_offset('2011-03-22', 1, roll='backward') numpy.datetime64('2011-03-23','D') """) add_newdoc('numpy.core.multiarray', 'busday_count', """ busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None) Counts the number of valid days between `begindates` and `enddates`, not including the day of `enddates`. If ``enddates`` specifies a date value that is earlier than the corresponding ``begindates`` date value, the count will be negative. .. versionadded:: 1.7.0 Parameters ---------- begindates : array_like of datetime64[D] The array of the first dates for counting. enddates : array_like of datetime64[D] The array of the end dates for counting, which are excluded from the count themselves. weekmask : str or array_like of bool, optional A seven-element array indicating which of Monday through Sunday are valid days. May be specified as a length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for weekdays, optionally separated by white space. Valid abbreviations are: Mon Tue Wed Thu Fri Sat Sun holidays : array_like of datetime64[D], optional An array of dates to consider as invalid dates. They may be specified in any order, and NaT (not-a-time) dates are ignored. This list is saved in a normalized form that is suited for fast calculations of valid days. busdaycal : busdaycalendar, optional A `busdaycalendar` object which specifies the valid days. If this parameter is provided, neither weekmask nor holidays may be provided. out : array of int, optional If provided, this array is filled with the result. Returns ------- out : array of int An array with a shape from broadcasting ``begindates`` and ``enddates`` together, containing the number of valid days between the begin and end dates. See Also -------- busdaycalendar: An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. Examples -------- >>> # Number of weekdays in January 2011 ... np.busday_count('2011-01', '2011-02') 21 >>> # Number of weekdays in 2011 ... np.busday_count('2011', '2012') 260 >>> # Number of Saturdays in 2011 ... np.busday_count('2011', '2012', weekmask='Sat') 53 """) add_newdoc('numpy.core.multiarray', 'normalize_axis_index', """ normalize_axis_index(axis, ndim, msg_prefix=None) Normalizes an axis index, `axis`, such that is a valid positive index into the shape of array with `ndim` dimensions. Raises an AxisError with an appropriate message if this is not possible. Used internally by all axis-checking logic. .. versionadded:: 1.13.0 Parameters ---------- axis : int The un-normalized index of the axis. Can be negative ndim : int The number of dimensions of the array that `axis` should be normalized against msg_prefix : str A prefix to put before the message, typically the name of the argument Returns ------- normalized_axis : int The normalized axis index, such that `0 <= normalized_axis < ndim` Raises ------ AxisError If the axis index is invalid, when `-ndim <= axis < ndim` is false. Examples -------- >>> normalize_axis_index(0, ndim=3) 0 >>> normalize_axis_index(1, ndim=3) 1 >>> normalize_axis_index(-1, ndim=3) 2 >>> normalize_axis_index(3, ndim=3) Traceback (most recent call last): ... AxisError: axis 3 is out of bounds for array of dimension 3 >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') Traceback (most recent call last): ... AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 """) add_newdoc('numpy.core.multiarray', 'datetime_as_string', """ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') Convert an array of datetimes into an array of strings. Parameters ---------- arr : array_like of datetime64 The array of UTC timestamps to format. unit : str One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`. timezone : {'naive', 'UTC', 'local'} or tzinfo Timezone information to use when displaying the datetime. If 'UTC', end with a Z to indicate UTC time. If 'local', convert to the local timezone first, and suffix with a +-#### timezone offset. If a tzinfo object, then do as with 'local', but use the specified timezone. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} Casting to allow when changing between datetime units. Returns ------- str_arr : ndarray An array of strings the same shape as `arr`. Examples -------- >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', '2002-10-27T07:30'], dtype='datetime64[m]') Setting the timezone to UTC shows the same information, but with a Z suffix >>> np.datetime_as_string(d, timezone='UTC') array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', '2002-10-27T07:30Z'], dtype='<U35') Note that we picked datetimes that cross a DST boundary. Passing in a ``pytz`` timezone object will print the appropriate offset >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39') Passing in a unit will change the precision >>> np.datetime_as_string(d, unit='h') array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], dtype='<U32') >>> np.datetime_as_string(d, unit='s') array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', '2002-10-27T07:30:00'], dtype='<U38') 'casting' can be used to specify whether precision can be changed >>> np.datetime_as_string(d, unit='h', casting='safe') TypeError: Cannot create a datetime string as units 'h' from a NumPy datetime with units 'm' according to the rule 'safe' """) add_newdoc('numpy.core.multiarray', 'datetime_data', """ datetime_data(dtype, /) Get information about the step size of a date or time type. The returned tuple can be passed as the second argument of `datetime64` and `timedelta64`. Parameters ---------- dtype : dtype The dtype object, which must be a `datetime64` or `timedelta64` type. Returns ------- unit : str The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype is based. count : int The number of base units in a step. Examples -------- >>> dt_25s = np.dtype('timedelta64[25s]') >>> np.datetime_data(dt_25s) ('s', 25) >>> np.array(10, dt_25s).astype('timedelta64[s]') array(250, dtype='timedelta64[s]') The result can be used to construct a datetime that uses the same units as a timedelta:: >>> np.datetime64('2010', np.datetime_data(dt_25s)) numpy.datetime64('2010-01-01T00:00:00','25s') """) ############################################################################## # # nd_grid instances # ############################################################################## add_newdoc('numpy.lib.index_tricks', 'mgrid', """ `nd_grid` instance which returns a dense multi-dimensional "meshgrid". An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense (or fleshed out) mesh-grid when indexed, so that each returned argument has the same shape. The dimensions and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a **complex number** (e.g. 5j), then the integer part of its magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value **is inclusive**. Returns ---------- mesh-grid `ndarrays` all of the same dimensions See Also -------- numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects ogrid : like mgrid but returns open (not fleshed out) mesh grids r_ : array concatenator Examples -------- >>> np.mgrid[0:5,0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]]) >>> np.mgrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) """) add_newdoc('numpy.lib.index_tricks', 'ogrid', """ `nd_grid` instance which returns an open multi-dimensional "meshgrid". An instance of `numpy.lib.index_tricks.nd_grid` which returns an open (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension of each returned array is greater than 1. The dimension and number of the output arrays are equal to the number of indexing dimensions. If the step length is not a complex number, then the stop is not inclusive. However, if the step length is a **complex number** (e.g. 5j), then the integer part of its magnitude is interpreted as specifying the number of points to create between the start and stop values, where the stop value **is inclusive**. Returns ---------- mesh-grid `ndarrays` with only one dimension :math:`\\neq 1` See Also -------- np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids r_ : array concatenator Examples -------- >>> from numpy import ogrid >>> ogrid[-1:1:5j] array([-1. , -0.5, 0. , 0.5, 1. ]) >>> ogrid[0:5,0:5] [array([[0], [1], [2], [3], [4]]), array([[0, 1, 2, 3, 4]])] """) ############################################################################## # # Documentation for `generic` attributes and methods # ############################################################################## add_newdoc('numpy.core.numerictypes', 'generic', """ Base class for numpy scalar types. Class from which most (all?) numpy scalar types are derived. For consistency, exposes the same API as `ndarray`, despite many consequent attributes being either "get-only," or completely irrelevant. This is the class from which it is strongly suggested users should derive custom scalar types. """) # Attributes add_newdoc('numpy.core.numerictypes', 'generic', ('T', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('base', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('data', """Pointer to start of data.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', """Get array data-descriptor.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('flags', """The integer value of flags.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('flat', """A 1-D view of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('imag', """The imaginary part of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', """The length of one element in bytes.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', """The length of the scalar in bytes.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', """The number of array dimensions.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('real', """The real part of the scalar.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('shape', """Tuple of array dimensions.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('size', """The number of elements in the gentype.""")) add_newdoc('numpy.core.numerictypes', 'generic', ('strides', """Tuple of bytes steps in each dimension.""")) # Methods add_newdoc('numpy.core.numerictypes', 'generic', ('all', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('any', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('astype', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('choose', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('clip', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('compress', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('copy', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('dump', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('fill', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('item', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('max', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('mean', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('min', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', """ newbyteorder(new_order='S') Return a new `dtype` with a different byte order. Changes are also made in all fields and sub-arrays of the data type. The `new_order` code can be any from the following: * 'S' - swap dtype from current to opposite endian * {'<', 'L'} - little endian * {'>', 'B'} - big endian * {'=', 'N'} - native order * {'|', 'I'} - ignore (no change to byte order) Parameters ---------- new_order : str, optional Byte order to force; a value from the byte order specifications above. The default value ('S') results in swapping the current byte order. The code does a case-insensitive check on the first letter of `new_order` for the alternatives above. For example, any of 'B' or 'b' or 'biggish' are valid to specify big-endian. Returns ------- new_dtype : dtype New `dtype` object with the given change to the byte order. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('prod', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('put', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('resize', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('round', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('sort', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('std', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('sum', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('take', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('trace', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('var', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) add_newdoc('numpy.core.numerictypes', 'generic', ('view', """ Not implemented (virtual attribute) Class generic exists solely to derive numpy scalars from, and possesses, albeit unimplemented, all the attributes of the ndarray class so as to provide a uniform API. See Also -------- The corresponding attribute of the derived class of interest. """)) ############################################################################## # # Documentation for other scalar classes # ############################################################################## add_newdoc('numpy.core.numerictypes', 'bool_', """NumPy's Boolean type. Character code: ``?``. Alias: bool8""") add_newdoc('numpy.core.numerictypes', 'complex64', """ Complex number type composed of two 32 bit floats. Character code: 'F'. """) add_newdoc('numpy.core.numerictypes', 'complex128', """ Complex number type composed of two 64 bit floats. Character code: 'D'. Python complex compatible. """) add_newdoc('numpy.core.numerictypes', 'complex256', """ Complex number type composed of two 128-bit floats. Character code: 'G'. """) add_newdoc('numpy.core.numerictypes', 'float32', """ 32-bit floating-point number. Character code 'f'. C float compatible. """) add_newdoc('numpy.core.numerictypes', 'float64', """ 64-bit floating-point number. Character code 'd'. Python float compatible. """) add_newdoc('numpy.core.numerictypes', 'float96', """ """) add_newdoc('numpy.core.numerictypes', 'float128', """ 128-bit floating-point number. Character code: 'g'. C long float compatible. """) add_newdoc('numpy.core.numerictypes', 'int8', """8-bit integer. Character code ``b``. C char compatible.""") add_newdoc('numpy.core.numerictypes', 'int16', """16-bit integer. Character code ``h``. C short compatible.""") add_newdoc('numpy.core.numerictypes', 'int32', """32-bit integer. Character code 'i'. C int compatible.""") add_newdoc('numpy.core.numerictypes', 'int64', """64-bit integer. Character code 'l'. Python int compatible.""") add_newdoc('numpy.core.numerictypes', 'object_', """Any Python object. Character code: 'O'.""")
mit
Maikflow/django_test
lib/python2.7/site-packages/Django-1.7.1-py2.7.egg/django/core/files/images.py
66
2171
""" Utility functions for handling images. Requires Pillow (or PIL), as you might imagine. """ import zlib from django.core.files import File class ImageFile(File): """ A mixin for use alongside django.core.files.base.File, which provides additional features for dealing with images. """ def _get_width(self): return self._get_image_dimensions()[0] width = property(_get_width) def _get_height(self): return self._get_image_dimensions()[1] height = property(_get_height) def _get_image_dimensions(self): if not hasattr(self, '_dimensions_cache'): close = self.closed self.open() self._dimensions_cache = get_image_dimensions(self, close=close) return self._dimensions_cache def get_image_dimensions(file_or_path, close=False): """ Returns the (width, height) of an image, given an open file or a path. Set 'close' to True to close the file at the end if it is initially in an open state. """ from django.utils.image import ImageFile as PILImageFile p = PILImageFile.Parser() if hasattr(file_or_path, 'read'): file = file_or_path file_pos = file.tell() file.seek(0) else: file = open(file_or_path, 'rb') close = True try: # Most of the time PIL only needs a small chunk to parse the image and # get the dimensions, but with some TIFF files PIL needs to parse the # whole file. chunk_size = 1024 while 1: data = file.read(chunk_size) if not data: break try: p.feed(data) except zlib.error as e: # ignore zlib complaining on truncated stream, just feed more # data to parser (ticket #19457). if e.args[0].startswith("Error -5"): pass else: raise if p.image: return p.image.size chunk_size *= 2 return None finally: if close: file.close() else: file.seek(file_pos)
gpl-2.0
ryfeus/lambda-packs
Sklearn_scipy_numpy/source/click/_winconsole.py
195
7790
# -*- coding: utf-8 -*- # This module is based on the excellent work by Adam Bartoš who # provided a lot of what went into the implementation here in # the discussion to issue1602 in the Python bug tracker. # # There are some general differences in regards to how this works # compared to the original patches as we do not need to patch # the entire interpreter but just work in our little world of # echo and prmopt. import io import os import sys import zlib import time import ctypes import msvcrt from click._compat import _NonClosingTextIOWrapper, text_type, PY2 from ctypes import byref, POINTER, c_int, c_char, c_char_p, \ c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE try: from ctypes import pythonapi PyObject_GetBuffer = pythonapi.PyObject_GetBuffer PyBuffer_Release = pythonapi.PyBuffer_Release except ImportError: pythonapi = None from ctypes.wintypes import LPWSTR, LPCWSTR c_ssize_p = POINTER(c_ssize_t) kernel32 = windll.kernel32 GetStdHandle = kernel32.GetStdHandle ReadConsoleW = kernel32.ReadConsoleW WriteConsoleW = kernel32.WriteConsoleW GetLastError = kernel32.GetLastError GetCommandLineW = WINFUNCTYPE(LPWSTR)( ('GetCommandLineW', windll.kernel32)) CommandLineToArgvW = WINFUNCTYPE( POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( ('CommandLineToArgvW', windll.shell32)) STDIN_HANDLE = GetStdHandle(-10) STDOUT_HANDLE = GetStdHandle(-11) STDERR_HANDLE = GetStdHandle(-12) PyBUF_SIMPLE = 0 PyBUF_WRITABLE = 1 ERROR_SUCCESS = 0 ERROR_NOT_ENOUGH_MEMORY = 8 ERROR_OPERATION_ABORTED = 995 STDIN_FILENO = 0 STDOUT_FILENO = 1 STDERR_FILENO = 2 EOF = b'\x1a' MAX_BYTES_WRITTEN = 32767 class Py_buffer(ctypes.Structure): _fields_ = [ ('buf', c_void_p), ('obj', py_object), ('len', c_ssize_t), ('itemsize', c_ssize_t), ('readonly', c_int), ('ndim', c_int), ('format', c_char_p), ('shape', c_ssize_p), ('strides', c_ssize_p), ('suboffsets', c_ssize_p), ('internal', c_void_p) ] if PY2: _fields_.insert(-1, ('smalltable', c_ssize_t * 2)) # On PyPy we cannot get buffers so our ability to operate here is # serverly limited. if pythonapi is None: get_buffer = None else: def get_buffer(obj, writable=False): buf = Py_buffer() flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE PyObject_GetBuffer(py_object(obj), byref(buf), flags) try: buffer_type = c_char * buf.len return buffer_type.from_address(buf.buf) finally: PyBuffer_Release(byref(buf)) class _WindowsConsoleRawIOBase(io.RawIOBase): def __init__(self, handle): self.handle = handle def isatty(self): io.RawIOBase.isatty(self) return True class _WindowsConsoleReader(_WindowsConsoleRawIOBase): def readable(self): return True def readinto(self, b): bytes_to_be_read = len(b) if not bytes_to_be_read: return 0 elif bytes_to_be_read % 2: raise ValueError('cannot read odd number of bytes from ' 'UTF-16-LE encoded console') buffer = get_buffer(b, writable=True) code_units_to_be_read = bytes_to_be_read // 2 code_units_read = c_ulong() rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read, byref(code_units_read), None) if GetLastError() == ERROR_OPERATION_ABORTED: # wait for KeyboardInterrupt time.sleep(0.1) if not rv: raise OSError('Windows error: %s' % GetLastError()) if buffer[0] == EOF: return 0 return 2 * code_units_read.value class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): def writable(self): return True @staticmethod def _get_error_message(errno): if errno == ERROR_SUCCESS: return 'ERROR_SUCCESS' elif errno == ERROR_NOT_ENOUGH_MEMORY: return 'ERROR_NOT_ENOUGH_MEMORY' return 'Windows error %s' % errno def write(self, b): bytes_to_be_written = len(b) buf = get_buffer(b) code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 code_units_written = c_ulong() WriteConsoleW(self.handle, buf, code_units_to_be_written, byref(code_units_written), None) bytes_written = 2 * code_units_written.value if bytes_written == 0 and bytes_to_be_written > 0: raise OSError(self._get_error_message(GetLastError())) return bytes_written class ConsoleStream(object): def __init__(self, text_stream, byte_stream): self._text_stream = text_stream self.buffer = byte_stream @property def name(self): return self.buffer.name def write(self, x): if isinstance(x, text_type): return self._text_stream.write(x) try: self.flush() except Exception: pass return self.buffer.write(x) def writelines(self, lines): for line in lines: self.write(line) def __getattr__(self, name): return getattr(self._text_stream, name) def isatty(self): return self.buffer.isatty() def __repr__(self): return '<ConsoleStream name=%r encoding=%r>' % ( self.name, self.encoding, ) def _get_text_stdin(buffer_stream): text_stream = _NonClosingTextIOWrapper( io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), 'utf-16-le', 'strict', line_buffering=True) return ConsoleStream(text_stream, buffer_stream) def _get_text_stdout(buffer_stream): text_stream = _NonClosingTextIOWrapper( _WindowsConsoleWriter(STDOUT_HANDLE), 'utf-16-le', 'strict', line_buffering=True) return ConsoleStream(text_stream, buffer_stream) def _get_text_stderr(buffer_stream): text_stream = _NonClosingTextIOWrapper( _WindowsConsoleWriter(STDERR_HANDLE), 'utf-16-le', 'strict', line_buffering=True) return ConsoleStream(text_stream, buffer_stream) if PY2: def _hash_py_argv(): return zlib.crc32('\x00'.join(sys.argv[1:])) _initial_argv_hash = _hash_py_argv() def _get_windows_argv(): argc = c_int(0) argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) argv = [argv_unicode[i] for i in range(0, argc.value)] if not hasattr(sys, 'frozen'): argv = argv[1:] while len(argv) > 0: arg = argv[0] if not arg.startswith('-') or arg == '-': break argv = argv[1:] if arg.startswith(('-c', '-m')): break return argv[1:] _stream_factories = { 0: _get_text_stdin, 1: _get_text_stdout, 2: _get_text_stderr, } def _get_windows_console_stream(f, encoding, errors): if get_buffer is not None and \ encoding in ('utf-16-le', None) \ and errors in ('strict', None) and \ hasattr(f, 'isatty') and f.isatty(): func = _stream_factories.get(f.fileno()) if func is not None: if not PY2: f = getattr(f, 'buffer') if f is None: return None else: # If we are on Python 2 we need to set the stream that we # deal with to binary mode as otherwise the exercise if a # bit moot. The same problems apply as for # get_binary_stdin and friends from _compat. msvcrt.setmode(f.fileno(), os.O_BINARY) return func(f)
mit
Alex-van-der-Peet/bitcoin
qa/rpc-tests/test_framework/blocktools.py
93
2057
# blocktools.py - utilities for manipulating blocks and transactions # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from mininode import * from script import CScript, CScriptOp # Create a block (with regtest difficulty) def create_block(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time()+600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block def serialize_script_num(value): r = bytearray(0) if value == 0: return r neg = value < 0 absvalue = -value if neg else value while (absvalue): r.append(chr(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return r counter=1 # Create an anyone-can-spend coinbase transaction, assuming no miner fees def create_coinbase(heightAdjust = 0): global counter coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(counter+heightAdjust)), 0xffffffff)) counter += 1 coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50*100000000 halvings = int((counter+heightAdjust)/150) # regtest coinbaseoutput.nValue >>= halvings coinbaseoutput.scriptPubKey = "" coinbase.vout = [ coinbaseoutput ] coinbase.calc_sha256() return coinbase # Create a transaction with an anyone-can-spend output, that spends the # nth output of prevtx. def create_transaction(prevtx, n, sig, value): tx = CTransaction() assert(n < len(prevtx.vout)) tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) tx.vout.append(CTxOut(value, "")) tx.calc_sha256() return tx
mit
google-research/meta-dataset
meta_dataset/trainer.py
1
71223
# coding=utf-8 # Copyright 2022 The Meta-Dataset Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python2, python3 """Interface for a learner that uses BenchmarkReaderDataSource to get data.""" # TODO(lamblinp): Update variable names to be more consistent # - target, class_idx, label # - support, query # TODO(lamblinp): Simplify the logic around performing evaluation on the # `TRAIN_SPLIT` by, for instance, recording which data is episodic, and which # split it is coming from (independently from how it is used). from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import functools import os import re from absl import logging import gin.tf from meta_dataset import distribute_utils from meta_dataset import learners from meta_dataset.data import dataset_spec as dataset_spec_lib from meta_dataset.data import learning_spec from meta_dataset.data import pipeline from meta_dataset.data import providers from meta_dataset.data import read_episodes from meta_dataset.learners import experimental as experimental_learners from meta_dataset.models import functional_backbones import numpy as np import six from six.moves import range from six.moves import zip import tensorflow.compat.v1 as tf # Enable tf.data optimizations, which are applied to the input data pipeline. # It may be helpful to disable them when investigating regressions due to # changes in tf.data (see b/121130181 for instance), but they seem to be helpful # (or at least not detrimental) in general. ENABLE_DATA_OPTIMIZATIONS = True DATASETS_WITH_EXAMPLE_SPLITS = () TF_DATA_OPTIONS = tf.data.Options() if not ENABLE_DATA_OPTIMIZATIONS: # The Options object can be used to control which static or dynamic # optimizations to apply. TF_DATA_OPTIONS.experimental_optimization.apply_default_optimizations = False # Objective labels for hyperparameter optimization. ACC_MEAN_FORMAT_STRING = '%s_acc/mean' ACC_CI95_FORMAT_STRING = '%s_acc/ci95' # TODO(eringrant): Use `learning_spec.Split.TRAIN`, `learning_spec.Split.VALID`, # and `learning_spec.Split.TEST` instead of string constants, and replace all # remaining string redefinitions. TRAIN_SPLIT = 'train' VALID_SPLIT = 'valid' TEST_SPLIT = 'test' FLAGS = tf.flags.FLAGS class UnexpectedSplitError(ValueError): def __init__(self, unexpected_split, expected_splits=(TRAIN_SPLIT, TEST_SPLIT, VALID_SPLIT)): super(UnexpectedSplitError, self).__init__('Split must be one of {}, but received `{}`. '.format( expected_splits, unexpected_split)) @gin.configurable('benchmark') def get_datasets_and_restrictions(train_datasets='', eval_datasets='', use_dumped_episodes=False, restrict_classes=None, restrict_num_per_class=None): """Gets the list of dataset names and possible restrictions on their classes. Args: train_datasets: A string of comma-separated dataset names for training. eval_datasets: A string of comma-separated dataset names for evaluation. use_dumped_episodes: bool, if True `eval_datasets` are prefixed with `dumped` to trigger evaluation on dumped episodes instead of on the fly sampling. restrict_classes: If provided, a dict that maps dataset names to a dict that specifies for each of `TRAIN_SPLIT`, `VALID_SPLIT` and `TEST_SPLIT` the number of classes to restrict to. This can lead to some classes of a particular split of a particular dataset never participating in episode creation. restrict_num_per_class: If provided, a dict that maps dataset names to a dict that specifies for each of `meta_dataset.trainer.TRAIN_SPLIT`, `meta_dataset.trainer.VALID_SPLIT` and `meta_dataset.trainer.TEST_SPLIT` the number of examples per class to restrict to. For datasets / splits that are not specified, no restriction is applied. Returns: Two lists of dataset names and two possibly empty dictionaries. """ if restrict_classes is None: restrict_classes = {} if restrict_num_per_class is None: restrict_num_per_class = {} train_datasets = [d.strip() for d in train_datasets.split(',')] eval_datasets = [d.strip() for d in eval_datasets.split(',')] if use_dumped_episodes: eval_datasets = ['dumped_%s' % ds for ds in eval_datasets] return train_datasets, eval_datasets, restrict_classes, restrict_num_per_class def apply_dataset_options(dataset): """Apply the module-wide set of dataset options to dataset. In particular, this is used to enable or disable tf.data optimizations. This applies to the whole pipeline, so we can just set it at the end. Args: dataset: a tf.data.Dataset object. Returns: A tf.data.Dataset object with options applied. """ return dataset.with_options(TF_DATA_OPTIONS) def compute_class_proportions(unique_class_ids, shots, dataset_spec): """Computes the proportion of the total number of examples appearing as shots. Args: unique_class_ids: A 1D int Tensor of unique class IDs. shots: A 1D Tensor of the number of shots for each class in `unique_class_ids`. dataset_spec: A DatasetSpecification that contains informations about the class labels in `unique_class_ids`. Returns: A 1D Tensor with the proportion of examples appearing as shots per class in `unique_class_ids`, normalized by the total number of examples for each class in the dataset according to `dataset_spec`. """ # Get the total number of examples of each class in the dataset. num_dataset_classes = len(dataset_spec.images_per_class) num_images_per_class = [ dataset_spec.get_total_images_per_class(class_id) for class_id in range(num_dataset_classes) ] # Make sure that `unique_class_ids` are valid indices of # `num_images_per_class`. This is important since `tf.gather` will fail # silently and return zeros otherwise. num_classes = tf.shape(num_images_per_class)[0] check_valid_inds_op = tf.assert_less(unique_class_ids, num_classes) with tf.control_dependencies([check_valid_inds_op]): # Get the total number of examples of each class that is in the episode. num_images_per_class = tf.gather(num_images_per_class, unique_class_ids) # [?, ] # Get the proportions of examples of each class that appear in the episode. class_props = tf.truediv(shots, num_images_per_class) return class_props def get_split_enum(split): """Returns the Enum value corresponding to the given split. Args: split: A string, one of TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT. Raises: UnexpectedSplitError: split not TRAIN_SPLIT, VALID_SPLIT, or TEST_SPLIT. """ # Get the int representing the chosen split. if split == TRAIN_SPLIT: split_enum = learning_spec.Split.TRAIN elif split == VALID_SPLIT: split_enum = learning_spec.Split.VALID elif split == TEST_SPLIT: split_enum = learning_spec.Split.TEST else: raise UnexpectedSplitError(split) return split_enum def restore_or_log_informative_error(saver, sess, checkpoint_to_restore): """Attempt to restore from `checkpoint_to_restore` in `sess` using `saver`.""" try: saver.restore(sess, checkpoint_to_restore) except tf.errors.NotFoundError as e: logging.error('Tried to restore from checkpoint %s but failed.', checkpoint_to_restore) raise e else: logging.info('Restored from checkpoint %s.', checkpoint_to_restore) # TODO(eringrant): Split the current `Trainer` class into `Trainer` and # `Evaluator` classes to partition the constructor arguments into meaningful # groups. # TODO(eringrant): Refactor the current `Trainer` class to more transparently # deal with operations per split, since the present logic surrounding the # `eval_finegrainedness_split` is confusing. # TODO(eringrant): Better organize `Trainer` Gin configurations, which are # currently set in many configuration files. @gin.configurable class Trainer(object): """A Trainer for training a Learner on data provided by ReaderDataSource.""" def __init__( self, num_updates, batch_size, num_eval_episodes, checkpoint_every, validate_every, log_every, train_learner_class, eval_learner_class, is_training, checkpoint_to_restore, learning_rate, decay_learning_rate, decay_every, decay_rate, normalized_gradient_descent, experiment_name, pretrained_source, train_dataset_list, eval_dataset_list, restrict_classes, restrict_num_per_class, checkpoint_dir, summary_dir, records_root_dir, eval_finegrainedness, eval_finegrainedness_split, eval_imbalance_dataset, omit_from_saving_and_reloading, eval_split, train_episode_config, eval_episode_config, data_config, distribute, enable_tf_optimizations): # pyformat: disable """Initializes a Trainer. Args: num_updates: An integer, the number of training updates. batch_size: An integer, the size of batches for non-episodic models. num_eval_episodes: An integer, the number of episodes for evaluation. checkpoint_every: An integer, the number of episodes between consecutive checkpoints. validate_every: An integer, the number of episodes between consecutive validations. log_every: An integer, the number of episodes between consecutive logging. train_learner_class: A Learner to be used for meta-training. eval_learner_class: A Learner to be used for meta-validation or meta-testing. is_training: Bool, whether or not to train or just evaluate. checkpoint_to_restore: A string, the path to a checkpoint from which to restore variables. learning_rate: A float, the meta-learning learning rate. decay_learning_rate: A boolean, whether to decay the learning rate. decay_every: An integer, the learning rate is decayed for every multiple of this value. decay_rate: A float, the decay to apply to the learning rate. normalized_gradient_descent: A boolean, whether to use normalized gradient descent in addition to ADAM; improves stability for crosstransformers. experiment_name: A string, a name for the experiment. pretrained_source: A string, the pretraining setup to use. train_dataset_list: A list of names of datasets to train on. This can be any subset of the supported datasets. eval_dataset_list: A list of names of datasets to evaluate on either for validation during train or for final test evaluation, depending on the nature of the experiment, as dictated by `is_training'. restrict_classes: A dict that maps dataset names to a dict that specifies for each of TRAIN_SPLIT, VALID_SPLIT and TEST_SPLIT the number of classes to restrict to. This can lead to some classes of a particular split of a particular dataset never participating in episode creation. restrict_num_per_class: A dict that maps dataset names to a dict that specifies for each of TRAIN_SPLIT, VALID_SPLIT and TEST_SPLIT the number of examples per class to restrict to. For datasets / splits that are not mentioned, no restriction is applied. If restrict_num_per_class is the empty dict, no restriction is applied to any split of any dataset. checkpoint_dir: A string, the path to the checkpoint directory, or None if no checkpointing should occur. summary_dir: A string, the path to the checkpoint directory, or None if no summaries should be saved. records_root_dir: A string, the path to the dataset records directory. eval_finegrainedness: Whether to perform binary ImageNet evaluation for assessing the performance on fine- vs coarse- grained tasks. eval_finegrainedness_split: The subgraph of ImageNet to perform the aforementioned analysis on. Notably, if this is TRAIN_SPLIT, we need to ensure that an training data is used episodically, even if the given model is the baseline model which usually uses batches for training. eval_imbalance_dataset: A dataset on which to perform evaluation for assessing how class imbalance affects performance in binary episodes. By default it is empty and no imbalance analysis is performed. omit_from_saving_and_reloading: A list of strings that specifies substrings of variable names that should not be reloaded. eval_split: One of the constants TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT or None, according to the split whose results we want to use for the analysis. train_episode_config: An instance of EpisodeDescriptionConfig (in data/config.py). This is a config for setting the ways and shots of training episodes or the parameters for sampling them, if variable. eval_episode_config: An instance of EpisodeDescriptionConfig. Analogous to train_episode_config but used for eval episodes (validation or testing). data_config: A DataConfig, the data configuration. distribute: (Experimental) use tf.distribute to distribute computation across multiple GPUs using a MirroredStrategy. This has been tested with CrossTransformers, and may work with other episodic learners. It will split Episodes into multiple EpisodePieces before passing them to the learners, where each EpisodePiece contains a portion of the original episode's query and support-set images. Batch learners are not currently supported. enable_tf_optimizations: Enable TensorFlow optimizations. It can add a few minutes to the first calls to session.run(), but decrease memory usage. Raises: RuntimeError: If requested to meta-learn the initialization of the linear layer weights but they are unexpectedly omitted from saving/restoring. UnexpectedSplitError: If split configuration is not as expected. """ # pyformat: enable self.num_updates = num_updates self.batch_size = batch_size self.num_eval_episodes = num_eval_episodes self.checkpoint_every = checkpoint_every self.validate_every = validate_every self.log_every = log_every self.checkpoint_to_restore = checkpoint_to_restore self.learning_rate = learning_rate self.decay_learning_rate = decay_learning_rate self.decay_every = decay_every self.decay_rate = decay_rate self.experiment_name = experiment_name self.pretrained_source = pretrained_source self.train_learner_class = train_learner_class self.eval_learner_class = eval_learner_class self.is_training = is_training self.train_dataset_list = train_dataset_list self.eval_dataset_list = eval_dataset_list self.normalized_gradient_descent = normalized_gradient_descent self.enable_tf_optimizations = enable_tf_optimizations # Currently we are supporting single dataset when we read from fixed # datasets like VTAB or dumped episodes. # Check whether we evaluate on VTAB if (len(self.eval_dataset_list) == 1 and self.eval_dataset_list[0].startswith('vtab')): self._fixed_eval = 'vtab' elif (len(self.eval_dataset_list) == 1 and self.eval_dataset_list[0].startswith('dumped')): self._fixed_eval = 'dumped' else: self._fixed_eval = None self.restrict_classes = restrict_classes self.restrict_num_per_class = restrict_num_per_class self.checkpoint_dir = checkpoint_dir self.summary_dir = summary_dir self.records_root_dir = records_root_dir self.eval_finegrainedness = eval_finegrainedness self.eval_finegrainedness_split = eval_finegrainedness_split self.eval_imbalance_dataset = eval_imbalance_dataset self.omit_from_saving_and_reloading = omit_from_saving_and_reloading self.data_initializeable_iterators = [] if eval_finegrainedness: # The fine- vs coarse- grained evaluation may potentially be performed on # the training graph as it exhibits greater variety in this aspect. self.eval_split = eval_finegrainedness_split elif eval_split: if eval_split not in (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT): raise UnexpectedSplitError(eval_split) self.eval_split = eval_split elif is_training: self.eval_split = VALID_SPLIT else: self.eval_split = TEST_SPLIT if eval_finegrainedness or eval_imbalance_dataset: # We restrict this analysis to the binary classification setting. logging.info( 'Forcing the number of %s classes to be 2, since ' 'the finegrainedness analysis is applied on binary ' 'classification tasks only.', eval_finegrainedness_split) if eval_finegrainedness and eval_finegrainedness_split == TRAIN_SPLIT: train_episode_config.num_ways = 2 else: eval_episode_config.num_ways = 2 self.num_classes_train = train_episode_config.num_ways self.num_classes_eval = eval_episode_config.num_ways self.num_support_train = train_episode_config.num_support self.num_query_train = train_episode_config.num_query self.num_support_eval = eval_episode_config.num_support self.num_query_eval = eval_episode_config.num_query self.train_episode_config = train_episode_config self.eval_episode_config = eval_episode_config self.data_config = data_config # TODO(eringrant): Adapt these image-specific expectations to feature # inputs. self.image_shape = [data_config.image_height] * 2 + [3] # Create the benchmark specification. self.benchmark_spec = self.get_benchmark_specification() # Which splits to support depends on whether we are in the meta-training # phase or not. If we are, we need the train split, and the valid one for # early-stopping. If not, we only need the test split. self.required_splits = [TRAIN_SPLIT] if self.is_training else [] self.required_splits += [self.eval_split] # Get the training, validation and testing specifications. # Each is either an EpisodeSpecification or a BatchSpecification. self.split_episode_or_batch_specs = dict( zip(self.required_splits, map(self.get_batch_or_episodic_specification, self.required_splits))) # Get the next data (episode or batch) for the different splits. self.next_data = dict( zip(self.required_splits, map(self.build_data, self.required_splits))) self.distribute = distribute if self.distribute: self.strategy = tf.distribute.MirroredStrategy() else: self.strategy = None # Create the global step to pass to the learners. global_step = tf.train.get_or_create_global_step() if len(self.required_splits) > 1: if issubclass(self.train_learner_class, experimental_learners.ExperimentalLearner): assert issubclass( self.eval_learner_class, experimental_learners.ExperimentalLearner ), ('If the `Learner` for the train split is an `ExperimentalLearner`,' ' the `Learner` for the evaluation split must be as well, since ' 'otherwise parameters cannot be shared.') else: assert not issubclass( self.eval_learner_class, experimental_learners.ExperimentalLearner ), ('If the `Learner` for the evaluation split is not an ' '`ExperimentalLearner`, the `Learner` for the train split must not' ' be either, since otherwise parameters cannot be shared.') # Initialize the learners. self.learners = {} for split in self.required_splits: if split == TRAIN_SPLIT: # The learner for the training split should only be in training mode if # the evaluation split is not the training split. learner_is_training = self.eval_split != TRAIN_SPLIT learner_class = self.train_learner_class tied_learner = None else: learner_is_training = False learner_class = self.eval_learner_class # Share parameters between the training and evaluation `Learner`s. tied_learner = ( self.learners[TRAIN_SPLIT] if TRAIN_SPLIT in self.required_splits else None) learner = self.create_learner( is_training=learner_is_training, learner_class=learner_class, split=get_split_enum(split), tied_learner=tied_learner) if (isinstance(learner, learners.MAMLLearner) and not learner.zero_fc_layer and not learner.proto_maml_fc_layer_init): if 'linear_classifier' in FLAGS.omit_from_saving_and_reloading: raise ValueError('The linear layer is requested to be meta-learned ' 'since both `MAMLLearner.zero_fc_layer` and ' '`MAMLLearner.proto_maml_fc_layer_init` are False, ' 'but the `linear_classifier` tags is found in ' 'FLAGS.omit_from_saving_and_reloading so they will ' 'not be properly restored. Please exclude these ' 'weights from omit_from_saving_and_reloading for ' 'this setting to work as expected.') self.learners[split] = learner # Build the data-dependent functions (run_fn returns prediction, # un-regularized loss, accuracy, and episode statistics), the iterators # producing data (data_fn), and regularizer, for each learner. run_fn, data_fn, regularizer_fn = zip( *[self.build_learner(split) for split in self.required_splits]) self.run_fns = dict(zip(self.required_splits, run_fn)) self.data_fns = dict(zip(self.required_splits, data_fn)) self.regularizer_fns = dict(zip(self.required_splits, regularizer_fn)) # Get an optimizer and the operation for meta-training. self.train_op = None if self.is_training: learning_rate = self.learning_rate if self.decay_learning_rate: learning_rate = tf.train.exponential_decay( self.learning_rate, global_step, decay_steps=self.decay_every, decay_rate=self.decay_rate, staircase=True) tf.summary.scalar('learning_rate', learning_rate) if self.distribute: self.optimizer = tf.keras.optimizers.Adam(learning_rate) else: self.optimizer = tf.train.AdamOptimizer(learning_rate) self.run_fns[TRAIN_SPLIT] = self.get_run_fn_with_train_op( self.run_fns[TRAIN_SPLIT], self.regularizer_fns[TRAIN_SPLIT], global_step) self.predictions = {} self.losses = {} self.accuracies = {} self.episode_info = {} for split in self.required_splits: if self.distribute: with self.strategy.scope(): output = self.strategy.experimental_run(self.run_fns[split], self.data_fns[split]) if self.strategy.num_replicas_in_sync > 1: output['predictions'] = tf.concat( output['predictions'].values, axis=0) output['loss'] = tf.concat(output['loss'].values, axis=0) output['accuracy'] = tf.concat(output['accuracy'].values, axis=0) if split == TRAIN_SPLIT and self.is_training: output['train_op'] = tf.group(output['train_op'].values) # The computed episode_info should be identical for all replicas. episode_info = {} for key, val in output['episode_info'].items(): if val is not None: # This control_dependencies is required or the call to # session.run() gets deadlocked. with tf.control_dependencies(val.values): episode_info[key] = tf.identity(val.values[0]) else: episode_info[key] = None output['episode_info'] = episode_info else: data_tensors = tf.data.make_one_shot_iterator( self.data_fns[split]()).get_next() output = self.run_fns[split](data_tensors) loss = tf.reduce_mean(output['loss']) loss += self.regularizer_fns[split]() self.losses[split] = loss self.accuracies[split] = tf.reduce_mean(output['accuracy']) self.predictions[split] = output['predictions'] self.episode_info[split] = output['episode_info'] if split == TRAIN_SPLIT and self.is_training: self.train_op = output['train_op'] if self.checkpoint_dir is not None: if not tf.io.gfile.exists(self.checkpoint_dir): tf.io.gfile.makedirs(self.checkpoint_dir) # Meaningless values so that logging works even if called before evaluation. self.valid_acc = np.nan self.valid_ci = np.nan self.initialize_session() self.initialize_saver() self.create_summary_writer() def build_learner(self, split): """Return predictions, losses and accuracies for the learner on split. Args: split: A `learning_spec.Split` that identifies the data split for which the learner is to be built. Returns: run_fn: a function which, when called on data, will run the network forward pass in split mode `split` and return: predictions: A `tf.Tensor`; the predictions of the learner on `split`. losses: A `tf.Tensor`; the losses of the learner on `split`. accuracies: A `tf.Tensor`; the accuracies of the learner on `split`. episode_info: A map of string to `tf.Tensor`, which has statistics about the input data. data_fn: a function which returns a `tf.Dataset` which can be used to provide input to run_fn regularizer: a `tf.Tensor` which computes a data-independent regularizer (e.g. weight decay) that is to be applied at every iteration. """ learner = self.learners[split] # Build the learner and its variables outside the name scope. learner.build() with tf.name_scope(split): data_src = self.next_data[split] if self.distribute: with self.strategy.scope(): # We need to split both support and query sets across GPUs, and # tf.data doesn't make this straightforward, as there are few # functions for splitting up datasets. We use unbatch to accomplish # this, but unbatch splits the first dimension and creates one # example for every possible index along that dimension. # # Therefore, the strategy is to first compute the chunk boundaries # for the support and query sets (chunk_bounds), pad the last # chunks to match the earlier chunks along the first axis, # and then stack all chunks along a new first axis. # unbatch() then correctly splits the episode across gpus. We then # trim the padding from later chunks (trim_extra). The result is a # single dataset with chunks for each GPU interleaved with one # another. We use shard() to split this single dataset into one # dataset per gpu. def chunk_bounds(x, num_gpu, idx): num_per_gpu = tf.cast( tf.ceil( tf.cast(tf.shape(x)[0], tf.float32) / tf.cast(num_gpu, tf.float32)), tf.int32) lb = tf.minimum(idx * num_per_gpu, tf.shape(x)[0]) ub = tf.minimum((idx + 1) * num_per_gpu, tf.shape(x)[0]) return lb, ub, tf.minimum((idx + 1) * num_per_gpu - ub, num_per_gpu) def chunk_array(arr): """Deterministically chunk the arrays across devices.""" num_gpu = self.strategy.num_replicas_in_sync chunks = [] num = [] for idx in range(num_gpu): lb, ub, num_extra = chunk_bounds(arr, num_gpu, idx) pad = tf.tile(arr[0:1], [num_extra] + [1] * (len(arr.shape) - 1)) * 0 - 1 chunks.append(tf.concat([arr[lb:ub], pad], axis=0)) num.append(ub - lb) return tf.stack(chunks), tf.stack(num) def chunk_episode(episode): way_tiled = episode.way + tf.zeros( [self.strategy.num_replicas_in_sync], dtype=tf.int32) return ((way_tiled, chunk_array(episode.support_images)[0], chunk_array(episode.support_labels)[0]) + chunk_array(episode.support_class_ids) + (chunk_array(episode.query_images)[0], chunk_array(episode.query_labels)[0]) + chunk_array(episode.query_class_ids)) def trim_extra(way, support_images, support_labels, support_class_ids, support_num, query_images, query_labels, query_class_ids, query_num): return providers.EpisodePiece(support_images[:support_num], query_images[:query_num], support_labels[:support_num], query_labels[:query_num], support_class_ids[:support_num], query_class_ids[:query_num], way) chunked_data = data_src.map(chunk_episode).unbatch().map(trim_extra) def input_fn(input_context): return chunked_data.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) data = self.strategy.make_input_fn_iterator(input_fn) # Return a function that computes the regularizers, to defer their # computation till after the computational graph is instantiated # (otherwise the graph will be empty and no regularizers found). regularizer = self.learners[split].compute_regularizer self.data_initializeable_iterators.append(data) else: data = lambda: data_src regularizer = self.learners[split].compute_regularizer def run(data_local): """Run the forward pass of the model.""" predictions_dist = self.learners[split].forward_pass(data_local) loss_dist = self.learners[split].compute_loss( predictions=predictions_dist, onehot_labels=data_local.onehot_labels) accuracy_dist = self.learners[split].compute_accuracy( predictions=predictions_dist, onehot_labels=data_local.onehot_labels) episode_info = self.get_episode_info(data_local) return { 'predictions': predictions_dist, 'loss': loss_dist, 'accuracy': accuracy_dist, 'episode_info': episode_info, } return run, data, regularizer def get_episode_info(self, data): """Sets the Tensors for the info about the learner's next episode.""" res = {} # The batch trainer receives episodes only for the valid and test splits. # Therefore for the train split there is no defined way and shots. if isinstance(data, providers.Batch): (way_, shots_, class_props_, class_ids_, query_targets_) = [None] * 5 else: way_ = data.way shots_ = data.support_shots class_ids_ = data.unique_class_ids class_props_ = None if self.eval_imbalance_dataset: class_props_ = compute_class_proportions( class_ids_, shots_, self.eval_imbalance_dataset_spec) query_targets_ = data.query_labels res['way'] = way_ res['shots'] = shots_ res['class_props'] = class_props_ res['class_ids'] = class_ids_ res['query_targets'] = query_targets_ return res def create_summary_writer(self): """Create summaries and writer.""" # Add summaries for the losses / accuracies of the different learners. standard_summaries = [] for split in self.required_splits: with tf.name_scope(split): loss_summary = tf.summary.scalar('loss', self.losses[split]) acc_summary = tf.summary.scalar('acc', tf.reduce_mean(self.accuracies[split])) standard_summaries.append(loss_summary) standard_summaries.append(acc_summary) # Add summaries for the way / shot / logits / targets of the learner. evaluation_summaries = self.add_eval_summaries() # All summaries. self.standard_summaries = tf.summary.merge(standard_summaries) self.evaluation_summaries = tf.summary.merge(evaluation_summaries) # Get a writer. self.summary_writer = None if self.summary_dir is not None: self.summary_writer = tf.summary.FileWriter(self.summary_dir) if not tf.io.gfile.exists(self.summary_dir): tf.io.gfile.makedirs(self.summary_dir) def create_learner(self, is_training, learner_class, split, tied_learner=None): """Instantiates `learner_class`, tying weights to `tied_learner`.""" if issubclass(learner_class, learners.BatchLearner): logit_dim = self._get_logit_dim( split, is_batch_learner=True, is_training=is_training) elif issubclass(learner_class, learners.EpisodicLearner): logit_dim = self._get_logit_dim( split, is_batch_learner=False, is_training=is_training) else: raise ValueError('The specified `learner_class` should be a subclass of ' '`learners.BatchLearner` or `learners.EpisodicLearner`, ' 'but received {}.'.format(learner_class)) if (issubclass(learner_class, experimental_learners.ExperimentalLearner) and tied_learner is not None): return learner_class( is_training=is_training, logit_dim=logit_dim, input_shape=self.image_shape, embedding_fn=tied_learner.embedding_fn, ) else: return learner_class( is_training=is_training, logit_dim=logit_dim, input_shape=self.image_shape, ) def get_benchmark_specification(self, records_root_dir=None): """Returns a BenchmarkSpecification. Args: records_root_dir: Optional. If provided, a list or string that sets the directory in which a child directory will be searched for each dataset to locate that dataset's records and dataset specification. If it's a string, that path will be used for all datasets. If it's a list, its length must be the same as the number of datasets, in order to specify a different such directory for each. If None, self.records_root_dir will be used for all datasets. Raises: RuntimeError: Incorrect file_pattern detected in a dataset specification. """ (data_spec_list, has_dag_ontology, has_bilevel_ontology, splits_to_contribute) = [], [], [], [] seen_datasets = set() eval_dataset_list = self.eval_dataset_list # No need to read specs when specs not available and not needed. if self._fixed_eval: eval_dataset_list = [] if self.is_training: benchmark_datasets = self.train_dataset_list + eval_dataset_list else: benchmark_datasets = eval_dataset_list if isinstance(records_root_dir, list): if len(records_root_dir) != len(benchmark_datasets): raise ValueError('The given records_root_dir is a list whose length is ' 'not the same as the number of benchmark datasets. ' 'Found datasets {} (for the {} phase) but ' 'len(records_root_dir) is {}. Expected their lengths ' 'to match or records_path to be a string').format( benchmark_datasets, len(records_root_dir)) records_roots_for_datasets = records_root_dir elif isinstance(records_root_dir, six.text_type): records_roots_for_datasets = [records_root_dir] * len(benchmark_datasets) elif records_root_dir is None: records_roots_for_datasets = [self.records_root_dir ] * len(benchmark_datasets) for dataset_name, dataset_records_root in zip(benchmark_datasets, records_roots_for_datasets): # Might be seeing a dataset for a second time if it belongs to both the # train and eval dataset lists. if dataset_name in seen_datasets: continue dataset_records_path = os.path.join(dataset_records_root, dataset_name) data_spec = dataset_spec_lib.load_dataset_spec(dataset_records_path) # Only ImageNet has a DAG ontology. has_dag = (dataset_name.startswith('ilsvrc_2012')) # Only Omniglot has a bi-level ontology. is_bilevel = (dataset_name == 'omniglot') # The meta-splits that this dataset will contribute data to. if not self.is_training: # If we're meta-testing, all datasets contribute only to meta-test. splits = {self.eval_split} else: splits = set() if dataset_name in self.train_dataset_list: splits.add(TRAIN_SPLIT) if dataset_name in self.eval_dataset_list: splits.add(VALID_SPLIT) # By default, all classes of each split will eventually be used for # episode creation. But it might be that for some datasets, it is # requested to restrict the available number of classes of some splits. restricted_classes_per_split = {} if dataset_name in self.restrict_classes: classes_per_split = self.restrict_classes[dataset_name] for split, num_classes in classes_per_split.items(): # The option to restrict classes is not supported in conjuction with # non-uniform (bilevel or hierarhical) class sampling. episode_descr_config = ( self.train_episode_config if split == TRAIN_SPLIT else self.eval_episode_config) if has_dag and not episode_descr_config.ignore_dag_ontology: raise ValueError('Restrictions on the class set of a dataset with ' 'a DAG ontology are not supported when ' 'ignore_dag_ontology is False.') if is_bilevel and not episode_descr_config.ignore_bilevel_ontology: raise ValueError('Restrictions on the class set of a dataset with ' 'a bilevel ontology are not supported when ' 'ignore_bilevel_ontology is False.') restricted_classes_per_split[get_split_enum(split)] = num_classes # Initialize the DatasetSpecificaton to account for this restriction. data_spec.initialize(restricted_classes_per_split) # Log the applied restrictions. logging.info('Restrictions for dataset %s:', dataset_name) for split in list(splits): num_classes = data_spec.get_classes(get_split_enum(split)) logging.info('\t split %s is restricted to %d classes', split, num_classes) # Add this dataset to the benchmark. logging.info('Adding dataset %s', data_spec.name) data_spec_list.append(data_spec) has_dag_ontology.append(has_dag) has_bilevel_ontology.append(is_bilevel) splits_to_contribute.append(splits) # Book-keeping. seen_datasets.add(dataset_name) if self.eval_imbalance_dataset: self.eval_imbalance_dataset_spec = data_spec assert len(data_spec_list) == 1, ('Imbalance analysis is only ' 'supported on one dataset at a time.') benchmark_spec = dataset_spec_lib.BenchmarkSpecification( 'benchmark', self.image_shape, data_spec_list, has_dag_ontology, has_bilevel_ontology, splits_to_contribute) # Logging of which datasets will be used for the different meta-splits. splits_to_datasets = collections.defaultdict(list) for dataset_spec, splits_to_contribute in zip(data_spec_list, splits_to_contribute): for split in splits_to_contribute: splits_to_datasets[split].append(dataset_spec.name) for split, datasets in splits_to_datasets.items(): logging.info('Episodes for split %s will be created from %s', split, datasets) return benchmark_spec def initialize_session(self): """Initializes a tf.Session.""" if self.enable_tf_optimizations: self.sess = tf.Session() else: session_config = tf.ConfigProto() rewrite_options = session_config.graph_options.rewrite_options rewrite_options.disable_model_pruning = True rewrite_options.constant_folding = rewrite_options.OFF rewrite_options.arithmetic_optimization = rewrite_options.OFF rewrite_options.remapping = rewrite_options.OFF rewrite_options.shape_optimization = rewrite_options.OFF rewrite_options.dependency_optimization = rewrite_options.OFF rewrite_options.function_optimization = rewrite_options.OFF rewrite_options.layout_optimizer = rewrite_options.OFF rewrite_options.loop_optimization = rewrite_options.OFF rewrite_options.memory_optimization = rewrite_options.NO_MEM_OPT self.sess = tf.Session(config=session_config) # Restore or initialize the variables. self.sess.run(tf.global_variables_initializer()) self.sess.run(tf.local_variables_initializer()) for it in self.data_initializeable_iterators: self.sess.run(it.initialize()) def initialize_saver(self): """Initializes a tf.train.Saver and possibly restores parameters.""" # TODO(eringrant): Implement saving and restoring for # `ExperimentalLearner`s. # We omit from saving and restoring any variables that contains as a # substring anything in the list `self.omit_from_saving_and_reloading. # For example, those that track iterator state. logging.info( 'Omitting from saving / restoring any variable that ' 'contains any of the following substrings: %s', self.omit_from_saving_and_reloading) def is_not_requested_to_omit(variable_name): return all([ substring not in variable_name for substring in self.omit_from_saving_and_reloading ]) # TODO(doersch): the replica_ variables are created by the keras # distributed optimizer, and are copies of the optimizer (e.g. Adam) # variables. There's probably a smarter way to avoid saving them. var_list = list([ var for var in tf.global_variables() if is_not_requested_to_omit(var.name) and 'replica_' not in var.name ]) if var_list: self.saver = tf.train.Saver(var_list=var_list, max_to_keep=1200) else: self.saver = None logging.info('Variables not being saved since no variables left after ' 'filtering.') if self.checkpoint_to_restore: if not self.saver: raise ValueError( 'Checkpoint not restored, since there is no Saver created. This is ' 'likely due to no parameters being available. If you intend to run ' 'parameterless training, set `checkpoint_to_restore` to None.') if self.is_training: # To handle pre-emption, we continue from the latest checkpoint if # checkpoints already exist in the checkpoint directory. latest_checkpoint = None if self.checkpoint_dir is not None: latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir) if latest_checkpoint is not None: if not self.saver: raise ValueError( 'Checkpoint not restored, since there is no Saver created. This ' 'is likely due to no parameters being available. ') restore_or_log_informative_error(self.saver, self.sess, latest_checkpoint) elif self.checkpoint_to_restore: logging.info('No training checkpoints found.') # For training episodic models from a checkpoint, we restore the # backbone weights but omit other (e.g., optimizer) parameters. backbone_vars_to_reload = [ var for var in tf.global_variables() if functional_backbones.is_backbone_variable( var.name, only_if=is_not_requested_to_omit) ] backbone_saver = tf.train.Saver( var_list=backbone_vars_to_reload, max_to_keep=1) restore_or_log_informative_error(backbone_saver, self.sess, self.checkpoint_to_restore) logging.info( 'Restored only vars %s from provided `checkpoint_to_restore`: %s', [var.name for var in backbone_vars_to_reload], self.checkpoint_to_restore) else: logging.info( 'No checkpoints found; training from random initialization.') elif self.checkpoint_to_restore is not None: # For evaluation, we restore more than the backbone (embedding function) # variables from the provided checkpoint, so we use `self.saver`. restore_or_log_informative_error(self.saver, self.sess, self.checkpoint_to_restore) else: logging.info( 'No checkpoints found; evaluating with a random initialization.') def get_batch_or_episodic_specification(self, split): if split == TRAIN_SPLIT: return self._create_train_specification() else: return self._create_eval_specification(split) def _create_train_specification(self): """Returns a `BatchSpecification` or `EpisodeSpecification` for training.""" if (issubclass(self.train_learner_class, learners.EpisodicLearner) or self.eval_split == TRAIN_SPLIT): return learning_spec.EpisodeSpecification(learning_spec.Split.TRAIN, self.num_classes_train, self.num_support_train, self.num_query_train) elif issubclass(self.train_learner_class, learners.BatchLearner): return learning_spec.BatchSpecification(learning_spec.Split.TRAIN, self.batch_size) else: raise ValueError('The specified `learner_class` should be a subclass of ' '`learners.BatchLearner` or `learners.EpisodicLearner`, ' 'but received {}.'.format(self.train_learner_class)) def _create_eval_specification(self, split=TEST_SPLIT): """Create an `EpisodeSpecification` for episodic evaluation. Args: split: The split from which to generate the `EpisodeSpecification`. Returns: An `EpisodeSpecification`. Raises: ValueError: Invalid `split`. """ if split not in (VALID_SPLIT, TEST_SPLIT): raise UnexpectedSplitError( split, expected_splits=(VALID_SPLIT, TEST_SPLIT)) split_enum = get_split_enum(split) return learning_spec.EpisodeSpecification(split_enum, self.num_classes_eval, self.num_support_eval, self.num_query_eval) def _restrict_dataset_list_for_split(self, split, splits_to_contribute, dataset_list): """Returns the restricted dataset_list for the given split. Args: split: A string, either TRAIN_SPLIT, VALID_SPLIT or TEST_SPLIT. splits_to_contribute: A list whose length is the number of datasets in the benchmark. Each element is a set of strings corresponding to the splits that the respective dataset will contribute to. dataset_list: A list which has one element per selected dataset (same length as splits_to_contribute), e.g. this can be one of the lists dataset_spec_list, has_dag_ontology, has_bilevel_ontology of the BenchmarkSpecification. """ updated_list = [] for dataset_num, dataset_splits in enumerate(splits_to_contribute): if split in dataset_splits: updated_list.append(dataset_list[dataset_num]) return updated_list def get_num_to_take(self, dataset_name, split): """Return the number of examples to restrict to for a dataset/split pair.""" num_to_take = -1 # By default, no restriction. if dataset_name in self.restrict_num_per_class: dataset_restrict_num_per_class = self.restrict_num_per_class[dataset_name] if split in dataset_restrict_num_per_class: num_to_take = dataset_restrict_num_per_class[split] return num_to_take def build_data(self, split): """Builds a `tf.Dataset` of episodes or batches from `split`.""" learner_class = ( self.train_learner_class if split == TRAIN_SPLIT else self.eval_learner_class) if (issubclass(learner_class, learners.BatchLearner) and split != self.eval_split): return self._build_batch(split) elif (issubclass(learner_class, learners.EpisodicLearner) or split == self.eval_split): if self._fixed_eval == 'vtab': return self._build_vtab_episode() elif self._fixed_eval == 'dumped': return self._build_dumped_episode(split) else: return self._build_episode(split) else: raise ValueError('The `Learner` for `split` should be a subclass of ' '`learners.BatchLearner` or `learners.EpisodicLearner`, ' 'but received {}.'.format(learner_class)) def _build_vtab_episode(self): """Build a `tf.Dataset` of vtab episodes.""" # There is only one dataset. # ['vtab_cifar'] -> 'cifar' dataset_name = re.search('^vtab_(.+)', self.eval_dataset_list[0]).group(1) # Replace back comma. dataset_name = dataset_name.replace(':', ',') return_vals = read_episodes.read_vtab_as_episode( dataset_name, self.data_config.image_height, query_size_limit=self.data_config.vtab_query_size_limit) support_ds, query_ds, n_eval, n_classes = return_vals self.vtab_test_classes = n_classes logging.info('Using VTAB episode for eval. Dataset: %s, n_eval: %d', dataset_name, n_eval) episodes = tf.data.Dataset.zip((support_ds.repeat(), query_ds.repeat())) self.num_eval_episodes = n_eval def create_episode_struct(support_data, query_data): return providers.Episode( support_images=support_data['image'], query_images=query_data['image'], support_labels=support_data['label'], query_labels=query_data['label'], support_class_ids=support_data['label'], query_class_ids=query_data['label']) return episodes.map(create_episode_struct) def _build_dumped_episode(self, split): """Builds a `tf.Dataset` of episodes through reading dumped episodes.""" dataset_name = re.search('^dumped_(.+)', self.eval_dataset_list[0]).group(1) folder_path = os.path.join(self.data_config.eval_dumped_episodes_dir, 'valid' if split == VALID_SPLIT else 'test', dataset_name) dumped_episode_ds, n_eval = read_episodes.read_episodes_from_records( folder_path) # If we request less than the available number of episodes, we use that # number instead. n_eval = min(self.num_eval_episodes, n_eval) logging.info('Using dumped episode for eval. Dataset: %s, n_eval: %d', dataset_name, n_eval) self.num_eval_episodes = n_eval map_fn = functools.partial( pipeline.process_dumped_episode, image_size=self.data_config.image_height) dataset = dumped_episode_ds.map(map_fn) # Overlap episode processing and training. data_pipeline = dataset.prefetch(1) data_pipeline = apply_dataset_options(data_pipeline) def create_episode_struct(support_images, support_labels, support_class_ids, query_images, query_labels, query_class_ids): return providers.Episode( support_images=support_images, query_images=query_images, support_labels=support_labels, query_labels=query_labels, support_class_ids=support_class_ids, query_class_ids=query_class_ids) return data_pipeline.map(create_episode_struct) def _build_episode(self, split): """Builds a `tf.Dataset` containing Episodes for "split". Args: split: A string, either TRAIN_SPLIT, VALID_SPLIT, or TEST_SPLIT. Returns: An `tf.Dataset` with Episodes. Raises: UnexpectedSplitError: If split not as expected for this episode build. """ shuffle_buffer_size = self.data_config.shuffle_buffer_size read_buffer_size_bytes = self.data_config.read_buffer_size_bytes num_prefetch = self.data_config.num_prefetch (_, image_shape, dataset_spec_list, has_dag_ontology, has_bilevel_ontology, splits_to_contribute) = self.benchmark_spec # Choose only the datasets that are chosen to contribute to the given split. dataset_spec_list = self._restrict_dataset_list_for_split( split, splits_to_contribute, dataset_spec_list) has_dag_ontology = self._restrict_dataset_list_for_split( split, splits_to_contribute, has_dag_ontology) has_bilevel_ontology = self._restrict_dataset_list_for_split( split, splits_to_contribute, has_bilevel_ontology) episode_spec = self.split_episode_or_batch_specs[split] dataset_split = episode_spec[0] # TODO(lamblinp): Support non-square shapes if necessary. For now, all # images are resized to square, even if it changes the aspect ratio. image_size = image_shape[0] if image_shape[1] != image_size: raise ValueError( 'Expected a square image shape, not {}'.format(image_shape)) if split == TRAIN_SPLIT: episode_descr_config = self.train_episode_config elif split in (VALID_SPLIT, TEST_SPLIT): episode_descr_config = self.eval_episode_config else: raise UnexpectedSplitError(split) # Decide how many examples per class to restrict to for each dataset for the # given split (by default there is no restriction). num_per_class = [] # A list whose length is the number of datasets. for dataset_spec in dataset_spec_list: num_per_class.append(self.get_num_to_take(dataset_spec.name, split)) if split == TRAIN_SPLIT: # The learner for the training split should only be in training mode if # the evaluation split is not the training split. gin_scope_name = ('train' if self.eval_split != TRAIN_SPLIT else 'evaluation') else: gin_scope_name = 'evaluation' ignore_hierarchy_prob = episode_descr_config.ignore_hierarchy_probability simclr_episode_fraction = episode_descr_config.simclr_episode_fraction if simclr_episode_fraction > 0: assert not self.enable_tf_optimizations, ( 'Must set enable_tf_optimizations=False or SimCLR will fail; see ' 'https://github.com/tensorflow/tensorflow/issues/22145') # TODO(lamblinp): pass specs directly to the pipeline builder. # TODO(lamblinp): move the special case directly in make_..._pipeline if len(dataset_spec_list) == 1: use_dag_ontology = has_dag_ontology[0] if self.eval_finegrainedness or self.eval_imbalance_dataset: use_dag_ontology = False with gin.config_scope(gin_scope_name): data_pipeline = pipeline.make_one_source_episode_pipeline( dataset_spec_list[0], use_dag_ontology=use_dag_ontology, use_bilevel_ontology=has_bilevel_ontology[0], split=dataset_split, episode_descr_config=episode_descr_config, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_size, num_to_take=num_per_class[0], simclr_episode_fraction=simclr_episode_fraction, ignore_hierarchy_probability=ignore_hierarchy_prob) else: if ignore_hierarchy_prob > 0.0: raise ValueError( 'ignore_hierarchy_probability not supported with multisource pipelines' ) with gin.config_scope(gin_scope_name): data_pipeline = pipeline.make_multisource_episode_pipeline( dataset_spec_list, use_dag_ontology_list=has_dag_ontology, use_bilevel_ontology_list=has_bilevel_ontology, split=dataset_split, episode_descr_config=episode_descr_config, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_size, num_to_take=num_per_class, simclr_episode_fraction=simclr_episode_fraction) data_pipeline = apply_dataset_options(data_pipeline) def create_episode_struct(support_images, support_labels, support_class_ids, query_images, query_labels, query_class_ids): return providers.Episode( support_images=support_images, query_images=query_images, support_labels=support_labels, query_labels=query_labels, support_class_ids=support_class_ids, query_class_ids=query_class_ids) return data_pipeline.map(lambda x, y: x).map(create_episode_struct) def _build_batch(self, split): """Builds a `tf.Dataset` of Batch objects containing data for "split". Args: split: A string, either TRAIN_SPLIT, VALID_SPLIT, or TEST_SPLIT. Returns: A `tf.Dataset` containing Batches """ shuffle_buffer_size = self.data_config.shuffle_buffer_size read_buffer_size_bytes = self.data_config.read_buffer_size_bytes num_prefetch = self.data_config.num_prefetch (_, image_shape, dataset_spec_list, _, _, splits_to_contribute) = self.benchmark_spec # Choose only the datasets that are chosen to contribute to the given split. dataset_spec_list = self._restrict_dataset_list_for_split( split, splits_to_contribute, dataset_spec_list) # Decide how many examples per class to restrict to for each dataset for the # given split (by default there is no restriction). num_per_class = [] # A list whose length is the number of datasets. for dataset_spec in dataset_spec_list: num_per_class.append(self.get_num_to_take(dataset_spec.name, split)) dataset_split, batch_size = self.split_episode_or_batch_specs[split] for dataset_spec in dataset_spec_list: if dataset_spec.name in DATASETS_WITH_EXAMPLE_SPLITS: raise ValueError( 'Batch pipeline is used only at meta-train time, and does not ' 'handle datasets with example splits, which should only be used ' 'at meta-test (evaluation) time.') # TODO(lamblinp): pass specs directly to the pipeline builder. # TODO(lamblinp): move the special case directly in make_..._pipeline if len(dataset_spec_list) == 1: data_pipeline = pipeline.make_one_source_batch_pipeline( dataset_spec_list[0], split=dataset_split, batch_size=batch_size, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_shape[0], num_to_take=num_per_class[0]) else: data_pipeline = pipeline.make_multisource_batch_pipeline( dataset_spec_list, split=dataset_split, batch_size=batch_size, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_shape[0], num_to_take=num_per_class) data_pipeline = apply_dataset_options(data_pipeline) def create_batch_structure(data, dataset_index): (images, class_ids) = data # The number of available classes for each dataset all_n_classes = [ len(dataset_spec.get_classes(get_split_enum(split))) for dataset_spec in dataset_spec_list ] if len(dataset_spec_list) == 1: n_classes = all_n_classes[0] elif gin.query_parameter('BatchSplitReaderGetReader.add_dataset_offset'): # The total number of classes is the sum for all datasets n_classes = sum(all_n_classes) else: # The number of classes is the one of the current dataset n_classes = tf.convert_to_tensor(all_n_classes)[dataset_index] return providers.Batch( images=images, labels=class_ids, n_classes=n_classes) return data_pipeline.map(create_batch_structure) def get_run_fn_with_train_op(self, run_fn, regularizer_fn, global_step): """Returns the operation that performs a training update.""" def run_fn_with_train_op(data): """Run and train the model.""" res = run_fn(data) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) loss = distribute_utils.aggregate(res['loss']) # note: every worker computes the same loss. This is because the # reduce_mean needs to be computed globally. loss = tf.reduce_mean(loss) loss += regularizer_fn() replica_ctx = tf.distribute.get_replica_context() if replica_ctx: loss /= replica_ctx.num_replicas_in_sync # TODO(doersch): there's probably a better way to do EMA updates. EMAs # are MirroredVariables, which means the assign needs to happen on every # replica. I think what's happening is that the update from replica 1 # is getting run on every replica for a different copy of the mirrored # variable. Only running the assign ops from the first replica doesn't # work. with tf.control_dependencies([tf.group(update_ops)]): if self.normalized_gradient_descent: opt_vars = tf.trainable_variables() if self.distribute: grads = self.optimizer.get_gradients(loss, opt_vars) grads_and_vars = list(zip(grads, opt_vars)) else: grads_and_vars = self.optimizer.compute_gradients(loss, opt_vars) global_norm = 0 # We reverse the order of grads_and_vars because they're computed in # reverse order; this way, the network can begin aggregating the # global norm before the backwards pass is complete. for g, v in grads_and_vars[::-1]: if replica_ctx: g = replica_ctx.all_reduce('sum', g) sumsq = tf.reduce_sum(tf.square(g)) global_norm += sumsq nrm = tf.sqrt(tf.maximum(global_norm, 1e-5)) grads_and_vars2 = [] for g, v in grads_and_vars: grads_and_vars2.append((g / nrm, v)) train_op = self.optimizer.apply_gradients(grads_and_vars2) with tf.control_dependencies([train_op]): train_op = tf.assign(global_step, global_step + 1) else: train_op = self.optimizer.minimize( loss, global_step=global_step, var_list=tf.trainable_variables()) res['train_op'] = train_op return res return run_fn_with_train_op def get_updated_global_step(self): with tf.control_dependencies([self.train_op]): global_step = tf.identity(tf.train.get_global_step()) return global_step def train(self): """The training loop.""" global_step = self.sess.run(tf.train.get_global_step()) logging.info('Starting training from global_step: %d', global_step) updated_global_step = self.get_updated_global_step() should_save = self.checkpoint_dir is not None if should_save and global_step == 0: # Save the initialization weights. save_path = self.saver.save( self.sess, os.path.join(self.checkpoint_dir, 'model_0.ckpt')) logging.info('Model initialization saved: %s', save_path) # Compute the initial validation performance before starting the training, # unless train() has already been called on this object. if np.isnan([self.valid_acc, self.valid_ci]).any(): self.maybe_evaluate(global_step) while global_step < self.num_updates: # Perform the next update. (_, train_loss, train_acc, global_step) = self.sess.run([ self.train_op, self.losses[TRAIN_SPLIT], self.accuracies[TRAIN_SPLIT], updated_global_step ]) train_acc = np.mean(train_acc) # Maybe validate, depending on the global step's value. self.maybe_evaluate(global_step) # Log training progress. if not global_step % self.log_every: message = ( 'Update %d. Train loss: %f, Train accuracy: %f, ' 'Valid accuracy %f +/- %f.\n' % (global_step, train_loss, train_acc, self.valid_acc, self.valid_ci)) logging.info(message) # Update summaries. if self.summary_writer: summaries = self.sess.run(self.standard_summaries) self.summary_writer.add_summary(summaries, global_step) if should_save and global_step % self.checkpoint_every == 0: save_path = self.saver.save( self.sess, os.path.join(self.checkpoint_dir, 'model_%d.ckpt' % global_step)) logging.info('Model checkpoint saved: %s', save_path) def maybe_evaluate(self, global_step): """Maybe perform evaluation, depending on the value of global_step.""" if not global_step % self.validate_every: # Get the validation accuracy and confidence interval. (valid_acc, valid_ci, valid_acc_summary, valid_ci_summary) = self.evaluate( VALID_SPLIT, step=global_step) # Validation summaries are updated every time validation happens which is # every validate_every steps instead of log_every steps. if self.summary_writer: self.summary_writer.add_summary(valid_acc_summary, global_step) self.summary_writer.add_summary(valid_ci_summary, global_step) self.valid_acc = valid_acc self.valid_ci = valid_ci # TODO(evcu) Improve this so that if the eval_only loads a global_step, it is # used at logging instead of value 0. def evaluate(self, split, step=0): """Returns performance metrics across num_eval_trials episodes / batches.""" num_eval_trials = self.num_eval_episodes logging.info('Performing evaluation of the %s split using %d episodes...', split, num_eval_trials) accuracies = [] total_samples = 0 for eval_trial_num in range(num_eval_trials): # Following is used to normalize accuracies. acc, summaries = self.sess.run( [self.accuracies[split], self.evaluation_summaries]) # Write complete summaries during evaluation, but not training. # Otherwise, validation summaries become too big. if not self.is_training and self.summary_writer: self.summary_writer.add_summary(summaries, eval_trial_num) if self._fixed_eval == 'vtab': accuracies.append(np.sum(acc)) total_samples += np.size(acc) continue accuracies.append(np.mean(acc)) total_samples += 1 logging.info('Finished evaluation.') mean_acc = np.sum(accuracies) / total_samples ci_acc = np.std(accuracies) * 1.96 / np.sqrt(len(accuracies)) # confidence # VTAB evaluation has 1 episode. if self._fixed_eval == 'vtab': ci_acc = 0 if not self.is_training: # Logging during training is handled by self.train() instead. logging.info('Accuracy on the meta-%s split: %f, +/- %f.\n', split, mean_acc, ci_acc) with tf.name_scope('trainer_metrics'): with tf.name_scope(split): mean_acc_summary = tf.Summary() mean_acc_summary.value.add(tag='mean acc', simple_value=mean_acc) ci_acc_summary = tf.Summary() ci_acc_summary.value.add(tag='acc CI', simple_value=ci_acc) return mean_acc, ci_acc, mean_acc_summary, ci_acc_summary def add_eval_summaries(self): """Returns summaries of way / shot / classes/ logits / targets.""" evaluation_summaries = [ tf.summary.scalar('global_step', tf.train.get_global_step()) ] for split in self.required_splits: evaluation_summaries.extend(self._add_eval_summaries_split(split)) return evaluation_summaries def _add_eval_summaries_split(self, split): """Returns split's summaries of way / shot / classes / logits / targets.""" split_eval_summaries = [] episode_info = copy.copy(self.episode_info[split]) episode_info['query_logits'] = self.predictions[split] summary_labels = [ 'way', 'shots', 'class_ids', 'query_logits', 'query_targets' ] if self.eval_imbalance_dataset: summary_labels += ['class_props'] for label in summary_labels: if episode_info[label] is not None: if episode_info[label].shape: summary_fn = tf.summary.tensor_summary else: summary_fn = tf.summary.scalar summary = summary_fn('%s_%s' % (split, label), episode_info[label]) split_eval_summaries.append(summary) return split_eval_summaries def _get_logit_dim(self, split, is_batch_learner, is_training): """Returns the total number of logits needed. Args: split: string, one of TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT. is_batch_learner: bool, if True the logit count is obtained from dataset spec. If False, `max_ways` is used for episodic dataset. is_training: bool, used to decide number of logits. Returns: int, total number of logits needed. """ if self._fixed_eval == 'vtab': return self.vtab_test_classes if is_batch_learner: # Get the total number of classes in this split, across all datasets # contributing to this split. total_classes = 0 for dataset_spec, dataset_splits in zip( self.benchmark_spec.dataset_spec_list, self.benchmark_spec.splits_to_contribute): if any( get_split_enum(ds_split) == split for ds_split in dataset_splits): total_classes += len(dataset_spec.get_classes(split)) else: total_classes = ( self.train_episode_config.max_ways if is_training else self.eval_episode_config.max_ways) return total_classes
apache-2.0
ryfeus/lambda-packs
Keras_tensorflow_nightly/source2.7/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
26
50558
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow estimators for Linear and DNN joined training models (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import six from tensorflow.contrib import layers from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib from tensorflow.contrib.layers.python.layers import optimizers from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.python.feature_column import feature_column as fc_core from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import nn from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.summary import summary from tensorflow.python.training import sync_replicas_optimizer from tensorflow.python.training import training_util # The default learning rates are a historical artifact of the initial # implementation, but seem a reasonable choice. _DNN_LEARNING_RATE = 0.05 _LINEAR_LEARNING_RATE = 0.2 _FIX_GLOBAL_STEP_INCREMENT_DATE = "2017-04-15" _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS = ( "Please set fix_global_step_increment_bug=True and update training steps " "in your pipeline. See pydoc for details.") def _as_iterable(preds, output): for pred in preds: yield pred[output] def _get_feature_dict(features): if isinstance(features, dict): return features return {"": features} def _get_optimizer(optimizer): if callable(optimizer): return optimizer() else: return optimizer def _check_no_sync_replicas_optimizer(optimizer): if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer): raise ValueError( "SyncReplicasOptimizer is not supported in DNNLinearCombined model. " "If you want to use this optimizer, please use either DNN or Linear " "model.") def _linear_learning_rate(num_linear_feature_columns): """Returns the default learning rate of the linear model. The calculation is a historical artifact of this initial implementation, but has proven a reasonable choice. Args: num_linear_feature_columns: The number of feature columns of the linear model. Returns: A float. """ default_learning_rate = 1. / math.sqrt(num_linear_feature_columns) return min(_LINEAR_LEARNING_RATE, default_learning_rate) def _add_hidden_layer_summary(value, tag): summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value)) summary.histogram("%s/activation" % tag, value) def _add_layer_summary(value, tag): summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value)) summary.histogram("%s/activation" % tag, value) def _get_embedding_variable(column, collection_key, input_layer_scope): return ops.get_collection(collection_key, input_layer_scope + "/" + column.name) def _extract_embedding_lr_multipliers(embedding_lr_multipliers, collection_key, input_layer_scope): """Converts embedding lr multipliers to variable based gradient multiplier.""" if not embedding_lr_multipliers: return None gradient_multipliers = {} for column, lr_mult in embedding_lr_multipliers.items(): if not isinstance(column, feature_column_lib._EmbeddingColumn): # pylint: disable=protected-access raise ValueError( "learning rate multipler can only be defined for embedding columns. " "It is defined for {}".format(column)) embedding = _get_embedding_variable( column, collection_key, input_layer_scope) if not embedding: raise ValueError("Couldn't find a variable for column {}".format(column)) for v in embedding: gradient_multipliers[v] = lr_mult return gradient_multipliers def _dnn_linear_combined_model_fn(features, labels, mode, params, config=None): """Deep Neural Net and Linear combined model_fn. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `Head` instance. * linear_feature_columns: An iterable containing all the feature columns used by the Linear model. * linear_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the Linear model. Defaults to the Ftrl optimizer. * joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires all columns are sparse and have the 'sum' combiner. * dnn_feature_columns: An iterable containing all the feature columns used by the DNN model. * dnn_optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training the DNN model. Defaults to the Adagrad optimizer. * dnn_hidden_units: List of hidden units per DNN layer. * dnn_activation_fn: Activation function applied to each DNN layer. If `None`, will use `tf.nn.relu`. * dnn_dropout: When not `None`, the probability we will drop out a given DNN coordinate. * gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. * embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. * input_layer_partitioner: Optional. Partitioner for input layer. config: `RunConfig` object to configure the runtime settings. Returns: `ModelFnOps` Raises: ValueError: If both `linear_feature_columns` and `dnn_features_columns` are empty at the same time, or `input_layer_partitioner` is missing. """ head = params["head"] linear_feature_columns = params.get("linear_feature_columns") linear_optimizer = params.get("linear_optimizer") or "Ftrl" joint_linear_weights = params.get("joint_linear_weights") dnn_feature_columns = params.get("dnn_feature_columns") dnn_optimizer = params.get("dnn_optimizer") or "Adagrad" dnn_hidden_units = params.get("dnn_hidden_units") dnn_activation_fn = params.get("dnn_activation_fn") or nn.relu dnn_dropout = params.get("dnn_dropout") gradient_clip_norm = params.get("gradient_clip_norm") num_ps_replicas = config.num_ps_replicas if config else 0 input_layer_partitioner = params.get("input_layer_partitioner") or ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20)) embedding_lr_multipliers = params.get("embedding_lr_multipliers", {}) fix_global_step_increment_bug = params.get( "fix_global_step_increment_bug", True) if not linear_feature_columns and not dnn_feature_columns: raise ValueError( "Either linear_feature_columns or dnn_feature_columns must be defined.") features = _get_feature_dict(features) linear_optimizer = _get_optimizer(linear_optimizer) _check_no_sync_replicas_optimizer(linear_optimizer) dnn_optimizer = _get_optimizer(dnn_optimizer) _check_no_sync_replicas_optimizer(dnn_optimizer) # Build DNN Logits. dnn_parent_scope = "dnn" if not dnn_feature_columns: dnn_logits = None else: if not dnn_hidden_units: raise ValueError( "dnn_hidden_units must be defined when dnn_feature_columns is " "specified.") dnn_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas)) with variable_scope.variable_scope( dnn_parent_scope, values=tuple(six.itervalues(features)), partitioner=dnn_partitioner): with variable_scope.variable_scope( "input_from_feature_columns", values=tuple(six.itervalues(features)), partitioner=input_layer_partitioner) as dnn_input_scope: if all([ isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access for fc in dnn_feature_columns ]): net = layers.input_from_feature_columns( columns_to_tensors=features, feature_columns=dnn_feature_columns, weight_collections=[dnn_parent_scope], scope=dnn_input_scope) else: net = fc_core.input_layer( features=features, feature_columns=dnn_feature_columns, weight_collections=[dnn_parent_scope]) for layer_id, num_hidden_units in enumerate(dnn_hidden_units): with variable_scope.variable_scope( "hiddenlayer_%d" % layer_id, values=(net,)) as dnn_hidden_layer_scope: net = layers.fully_connected( net, num_hidden_units, activation_fn=dnn_activation_fn, variables_collections=[dnn_parent_scope], scope=dnn_hidden_layer_scope) if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN: net = layers.dropout( net, keep_prob=(1.0 - dnn_dropout)) # TODO(b/31209633): Consider adding summary before dropout. _add_layer_summary(net, dnn_hidden_layer_scope.name) with variable_scope.variable_scope( "logits", values=(net,)) as dnn_logits_scope: dnn_logits = layers.fully_connected( net, head.logits_dimension, activation_fn=None, variables_collections=[dnn_parent_scope], scope=dnn_logits_scope) _add_layer_summary(dnn_logits, dnn_logits_scope.name) # Build Linear logits. linear_parent_scope = "linear" if not linear_feature_columns: linear_logits = None else: linear_partitioner = partitioned_variables.min_max_variable_partitioner( max_partitions=num_ps_replicas, min_slice_size=64 << 20) with variable_scope.variable_scope( linear_parent_scope, values=tuple(six.itervalues(features)), partitioner=linear_partitioner) as scope: if all([isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access for fc in linear_feature_columns]): if joint_linear_weights: linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=linear_feature_columns, num_outputs=head.logits_dimension, weight_collections=[linear_parent_scope], scope=scope) else: linear_logits, _, _ = layers.weighted_sum_from_feature_columns( columns_to_tensors=features, feature_columns=linear_feature_columns, num_outputs=head.logits_dimension, weight_collections=[linear_parent_scope], scope=scope) else: linear_logits = fc_core.linear_model( features=features, feature_columns=linear_feature_columns, units=head.logits_dimension, weight_collections=[linear_parent_scope]) _add_layer_summary(linear_logits, scope.name) # Combine logits and build full model. if dnn_logits is not None and linear_logits is not None: logits = dnn_logits + linear_logits elif dnn_logits is not None: logits = dnn_logits else: logits = linear_logits def _make_training_op(training_loss): """Training op for the DNN linear combined model.""" train_ops = [] global_step = training_util.get_global_step() if dnn_logits is not None: train_ops.append( optimizers.optimize_loss( loss=training_loss, global_step=global_step, learning_rate=_DNN_LEARNING_RATE, optimizer=dnn_optimizer, gradient_multipliers=_extract_embedding_lr_multipliers( # pylint: disable=protected-access embedding_lr_multipliers, dnn_parent_scope, dnn_input_scope.name), clip_gradients=gradient_clip_norm, variables=ops.get_collection(dnn_parent_scope), name=dnn_parent_scope, # Empty summaries, because head already logs "loss" summary. summaries=[], increment_global_step=not fix_global_step_increment_bug)) if linear_logits is not None: train_ops.append( optimizers.optimize_loss( loss=training_loss, global_step=global_step, learning_rate=_linear_learning_rate(len(linear_feature_columns)), optimizer=linear_optimizer, clip_gradients=gradient_clip_norm, variables=ops.get_collection(linear_parent_scope), name=linear_parent_scope, # Empty summaries, because head already logs "loss" summary. summaries=[], increment_global_step=not fix_global_step_increment_bug)) train_op = control_flow_ops.group(*train_ops) if fix_global_step_increment_bug: with ops.control_dependencies([train_op]): with ops.colocate_with(global_step): return state_ops.assign_add(global_step, 1).op return train_op return head.create_model_fn_ops( features=features, mode=mode, labels=labels, train_op_fn=_make_training_op, logits=logits) class DNNLinearCombinedEstimator(estimator.Estimator): """An estimator for TensorFlow Linear and DNN joined training models. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ @deprecated_arg_values( _FIX_GLOBAL_STEP_INCREMENT_DATE, _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS, fix_global_step_increment_bug=False) def __init__(self, # _joint_linear_weights pylint: disable=invalid-name head, model_dir=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=None, dnn_dropout=None, gradient_clip_norm=None, config=None, feature_engineering_fn=None, embedding_lr_multipliers=None, fix_global_step_increment_bug=False, input_layer_partitioner=None): """Initializes a DNNLinearCombinedEstimator instance. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Args: head: A _Head object. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True will use a single (possibly partitioned) variable to store all weights for the linear model. More efficient if there are many columns, however requires all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set should be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. fix_global_step_increment_bug: If `False`, the estimator needs two fit steps to optimize both linear and dnn parts. If `True`, this bug is fixed. New users must set this to `True`, but the default value is `False` for backwards compatibility. input_layer_partitioner: Optional. Partitioner for input layer. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ linear_feature_columns = tuple(linear_feature_columns or []) dnn_feature_columns = tuple(dnn_feature_columns or []) if not linear_feature_columns + dnn_feature_columns: raise ValueError("Either linear_feature_columns or dnn_feature_columns " "must be defined.") super(DNNLinearCombinedEstimator, self).__init__( model_fn=_dnn_linear_combined_model_fn, model_dir=model_dir, config=config, params={ "head": head, "linear_feature_columns": linear_feature_columns, "linear_optimizer": linear_optimizer, "joint_linear_weights": _joint_linear_weights, "dnn_feature_columns": dnn_feature_columns, "dnn_optimizer": dnn_optimizer, "dnn_hidden_units": dnn_hidden_units, "dnn_activation_fn": dnn_activation_fn, "dnn_dropout": dnn_dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, "fix_global_step_increment_bug": fix_global_step_increment_bug, "input_layer_partitioner": input_layer_partitioner }, feature_engineering_fn=feature_engineering_fn) class DNNLinearCombinedClassifier(estimator.Estimator): """A classifier for TensorFlow Linear and DNN joined training models. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Example: ```python sparse_feature_a = sparse_column_with_hash_bucket(...) sparse_feature_b = sparse_column_with_hash_bucket(...) sparse_feature_a_x_sparse_feature_b = crossed_column(...) sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a, ...) sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b, ...) estimator = DNNLinearCombinedClassifier( # common settings n_classes=n_classes, weight_column_name=weight_column_name, # wide settings linear_feature_columns=[sparse_feature_a_x_sparse_feature_b], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.AdagradOptimizer(...)) # Input builders def input_fn_train: # returns x, y (where y represents label's class index). ... def input_fn_eval: # returns x, y (where y represents label's class index). ... def input_fn_predict: # returns x, None. ... estimator.fit(input_fn=input_fn_train) estimator.evaluate(input_fn=input_fn_eval) # predict_classes returns class indices. estimator.predict_classes(input_fn=input_fn_predict) ``` If the user specifies `label_keys` in constructor, labels must be strings from the `label_keys` vocabulary. Example: ```python label_keys = ['label0', 'label1', 'label2'] estimator = DNNLinearCombinedClassifier( n_classes=n_classes, linear_feature_columns=[sparse_feature_a_x_sparse_feature_b], dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], dnn_hidden_units=[1000, 500, 100], label_keys=label_keys) def input_fn_train: # returns x, y (where y is one of label_keys). pass estimator.fit(input_fn=input_fn_train) def input_fn_eval: # returns x, y (where y is one of label_keys). pass estimator.evaluate(input_fn=input_fn_eval) def input_fn_predict: # returns x, None # predict_classes returns one of label_keys. estimator.predict_classes(input_fn=input_fn_predict) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: * if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. * for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ @deprecated_arg_values( _FIX_GLOBAL_STEP_INCREMENT_DATE, _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS, fix_global_step_increment_bug=False) def __init__(self, # _joint_linear_weights pylint: disable=invalid-name model_dir=None, n_classes=2, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, gradient_clip_norm=None, enable_centered_bias=False, config=None, feature_engineering_fn=None, embedding_lr_multipliers=None, input_layer_min_slice_size=None, label_keys=None, fix_global_step_increment_bug=False): """Constructs a DNNLinearCombinedClassifier instance. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. Note that class labels are integers representing the class index (i.e. values from 0 to n_classes-1). For arbitrary label values (e.g. string labels), convert to class indices first. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. input_layer_min_slice_size: Optional. The min slice size of input layer partitions. If not provided, will use the default of 64M. label_keys: Optional list of strings with size `[n_classes]` defining the label vocabulary. Only supported for `n_classes` > 2. fix_global_step_increment_bug: If `False`, the estimator needs two fit steps to optimize both linear and dnn parts. If `True`, this bug is fixed. New users must set this to `True`, but it the default value is `False` for backwards compatibility. Raises: ValueError: If `n_classes` < 2. ValueError: If both `linear_feature_columns` and `dnn_features_columns` are empty at the same time. """ head = head_lib.multi_class_head( n_classes=n_classes, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias, label_keys=label_keys) linear_feature_columns = tuple(linear_feature_columns or []) dnn_feature_columns = tuple(dnn_feature_columns or []) self._feature_columns = linear_feature_columns + dnn_feature_columns if not self._feature_columns: raise ValueError("Either linear_feature_columns or dnn_feature_columns " "must be defined.") # TODO(b/35922130): Replace with `input_layer_partitioner` arg. input_layer_partitioner = None if input_layer_min_slice_size is not None: input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=config.num_ps_replicas if config else 0, min_slice_size=input_layer_min_slice_size)) super(DNNLinearCombinedClassifier, self).__init__( model_fn=_dnn_linear_combined_model_fn, model_dir=model_dir, config=config, params={ "head": head, "linear_feature_columns": linear_feature_columns, "linear_optimizer": linear_optimizer, "joint_linear_weights": _joint_linear_weights, "dnn_feature_columns": dnn_feature_columns, "dnn_optimizer": dnn_optimizer, "dnn_hidden_units": dnn_hidden_units, "dnn_activation_fn": dnn_activation_fn, "dnn_dropout": dnn_dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, "input_layer_partitioner": input_layer_partitioner, "fix_global_step_increment_bug": fix_global_step_increment_bug, }, feature_engineering_fn=feature_engineering_fn) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) @deprecated_arg_values( "2017-03-01", "Please switch to predict_classes, or set `outputs` argument.", outputs=None) def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Returns predictions for given features. By default, returns predicted classes. But this default will be dropped soon. Users should either pass `outputs`, or call `predict_classes` method. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. outputs: list of `str`, name of the output to predict. If `None`, returns classes. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted classes with shape [batch_size] (or an iterable of predicted classes if as_iterable is True). Each predicted class is represented by its class index (i.e. integer from 0 to n_classes-1). If `outputs` is set, returns a dict of predictions. """ if not outputs: return self.predict_classes( x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable) return super(DNNLinearCombinedClassifier, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs, as_iterable=as_iterable) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_classes(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns predicted classes for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted classes with shape [batch_size] (or an iterable of predicted classes if as_iterable is True). Each predicted class is represented by its class index (i.e. integer from 0 to n_classes-1). """ key = prediction_key.PredictionKey.CLASSES preds = super(DNNLinearCombinedClassifier, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key].reshape(-1) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_proba( self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns prediction probabilities for given features. Args: x: features. input_fn: Input function. If set, x and y must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted probabilities with shape [batch_size, n_classes] (or an iterable of predicted probabilities if as_iterable is True). """ key = prediction_key.PredictionKey.PROBABILITIES preds = super(DNNLinearCombinedClassifier, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=key) return preds[key] @deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.") def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BasEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return super(DNNLinearCombinedClassifier, self).export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=(signature_fn or export.classification_signature_fn_with_prob), prediction_key=prediction_key.PredictionKey.PROBABILITIES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) class DNNLinearCombinedRegressor(estimator.Estimator): """A regressor for TensorFlow Linear and DNN joined training models. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Example: ```python sparse_feature_a = sparse_column_with_hash_bucket(...) sparse_feature_b = sparse_column_with_hash_bucket(...) sparse_feature_a_x_sparse_feature_b = crossed_column(...) sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a, ...) sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b, ...) estimator = DNNLinearCombinedRegressor( # common settings weight_column_name=weight_column_name, # wide settings linear_feature_columns=[sparse_feature_a_x_sparse_feature_b], linear_optimizer=tf.train.FtrlOptimizer(...), # deep settings dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb], dnn_hidden_units=[1000, 500, 100], dnn_optimizer=tf.train.ProximalAdagradOptimizer(...)) # To apply L1 and L2 regularization, you can set optimizers as follows: tf.train.ProximalAdagradOptimizer( learning_rate=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.001) # It is same for FtrlOptimizer. # Input builders def input_fn_train: # returns x, y ... def input_fn_eval: # returns x, y ... def input_fn_predict: # returns x, None ... estimator.train(input_fn_train) estimator.evaluate(input_fn_eval) estimator.predict(input_fn_predict) ``` Input of `fit`, `train`, and `evaluate` should have following features, otherwise there will be a `KeyError`: if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. for each `column` in `dnn_feature_columns` + `linear_feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn, a feature with `key=column.name` whose `value` is a `Tensor`. """ @deprecated_arg_values( _FIX_GLOBAL_STEP_INCREMENT_DATE, _FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS, fix_global_step_increment_bug=False) def __init__(self, # _joint_linear_weights pylint: disable=invalid-name model_dir=None, weight_column_name=None, linear_feature_columns=None, linear_optimizer=None, _joint_linear_weights=False, dnn_feature_columns=None, dnn_optimizer=None, dnn_hidden_units=None, dnn_activation_fn=nn.relu, dnn_dropout=None, gradient_clip_norm=None, enable_centered_bias=False, label_dimension=1, config=None, feature_engineering_fn=None, embedding_lr_multipliers=None, input_layer_min_slice_size=None, fix_global_step_increment_bug=False): """Initializes a DNNLinearCombinedRegressor instance. Note: New users must set `fix_global_step_increment_bug=True` when creating an estimator. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. linear_feature_columns: An iterable containing all the feature columns used by linear part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to the linear part of the model. If `None`, will use a FTRL optimizer. _joint_linear_weights: If True a single (possibly partitioned) variable will be used to store the linear model weights. It's faster, but requires that all columns are sparse and have the 'sum' combiner. dnn_feature_columns: An iterable containing all the feature columns used by deep part of the model. All items in the set must be instances of classes derived from `FeatureColumn`. dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to the deep part of the model. If `None`, will use an Adagrad optimizer. dnn_hidden_units: List of hidden units per layer. All layers are fully connected. dnn_activation_fn: Activation function applied to each layer. If None, will use `tf.nn.relu`. dnn_dropout: When not None, the probability we will drop out a given coordinate. gradient_clip_norm: A float > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See tf.clip_by_global_norm for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. label_dimension: Number of regression targets per example. This is the size of the last dimension of the labels and logits `Tensor` objects (typically, these have shape `[batch_size, label_dimension]`). config: RunConfig object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. input_layer_min_slice_size: Optional. The min slice size of input layer partitions. If not provided, will use the default of 64M. fix_global_step_increment_bug: If `False`, the estimator needs two fit steps to optimize both linear and dnn parts. If `True`, this bug is fixed. New users must set this to `True`, but it the default value is `False` for backwards compatibility. Raises: ValueError: If both linear_feature_columns and dnn_features_columns are empty at the same time. """ linear_feature_columns = linear_feature_columns or [] dnn_feature_columns = dnn_feature_columns or [] self._feature_columns = linear_feature_columns + dnn_feature_columns if not self._feature_columns: raise ValueError("Either linear_feature_columns or dnn_feature_columns " "must be defined.") # TODO(b/35922130): Replace with `input_layer_partitioner` arg. input_layer_partitioner = None if input_layer_min_slice_size is not None: input_layer_partitioner = ( partitioned_variables.min_max_variable_partitioner( max_partitions=config.num_ps_replicas if config else 0, min_slice_size=input_layer_min_slice_size)) head = head_lib.regression_head( weight_column_name=weight_column_name, label_dimension=label_dimension, enable_centered_bias=enable_centered_bias) super(DNNLinearCombinedRegressor, self).__init__( model_fn=_dnn_linear_combined_model_fn, model_dir=model_dir, config=config, params={ "head": head, "linear_feature_columns": linear_feature_columns, "linear_optimizer": linear_optimizer, "joint_linear_weights": _joint_linear_weights, "dnn_feature_columns": dnn_feature_columns, "dnn_optimizer": dnn_optimizer, "dnn_hidden_units": dnn_hidden_units, "dnn_activation_fn": dnn_activation_fn, "dnn_dropout": dnn_dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, "input_layer_partitioner": input_layer_partitioner, "fix_global_step_increment_bug": fix_global_step_increment_bug, }, feature_engineering_fn=feature_engineering_fn) def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None, checkpoint_path=None, hooks=None): """See evaluable.Evaluable.""" # TODO(zakaria): remove once deprecation is finished (b/31229024) custom_metrics = {} if metrics: for key, metric in six.iteritems(metrics): if (not isinstance(metric, metric_spec.MetricSpec) and not isinstance(key, tuple)): custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric else: custom_metrics[key] = metric return super(DNNLinearCombinedRegressor, self).evaluate( x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, steps=steps, metrics=custom_metrics, name=name, checkpoint_path=checkpoint_path, hooks=hooks) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) @deprecated_arg_values( "2017-03-01", "Please switch to predict_scores, or set `outputs` argument.", outputs=None) def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Returns predictions for given features. By default, returns predicted scores. But this default will be dropped soon. Users should either pass `outputs`, or call `predict_scores` method. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. outputs: list of `str`, name of the output to predict. If `None`, returns scores. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted scores (or an iterable of predicted scores if as_iterable is True). If `label_dimension == 1`, the shape of the output is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`. If `outputs` is set, returns a dict of predictions. """ if not outputs: return self.predict_scores( x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable) return super(DNNLinearCombinedRegressor, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs, as_iterable=as_iterable) @deprecated_arg_values( estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS, as_iterable=False) def predict_scores(self, x=None, input_fn=None, batch_size=None, as_iterable=True): """Returns predicted scores for given features. Args: x: features. input_fn: Input function. If set, x must be None. batch_size: Override default batch size. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: Numpy array of predicted scores (or an iterable of predicted scores if as_iterable is True). If `label_dimension == 1`, the shape of the output is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`. """ key = prediction_key.PredictionKey.SCORES preds = super(DNNLinearCombinedRegressor, self).predict( x=x, input_fn=input_fn, batch_size=batch_size, outputs=[key], as_iterable=as_iterable) if as_iterable: return (pred[key] for pred in preds) return preds[key] @deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.") def export(self, export_dir, input_fn=None, input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, default_batch_size=1, exports_to_keep=None): """See BaseEstimator.export.""" def default_input_fn(unused_estimator, examples): return layers.parse_feature_columns_from_examples( examples, self._feature_columns) return super(DNNLinearCombinedRegressor, self).export( export_dir=export_dir, input_fn=input_fn or default_input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, signature_fn=signature_fn or export.regression_signature_fn, prediction_key=prediction_key.PredictionKey.SCORES, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) # Aliases # TODO(zakaria): Remove these aliases, See b/34751732 _DNNLinearCombinedEstimator = DNNLinearCombinedEstimator
mit
ryfeus/lambda-packs
Keras_tensorflow/source/tensorflow/core/framework/function_pb2.py
7
11710
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: tensorflow/core/framework/function.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2 from tensorflow.core.framework import node_def_pb2 as tensorflow_dot_core_dot_framework_dot_node__def__pb2 from tensorflow.core.framework import op_def_pb2 as tensorflow_dot_core_dot_framework_dot_op__def__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow/core/framework/function.proto', package='tensorflow', syntax='proto3', serialized_pb=_b('\n(tensorflow/core/framework/function.proto\x12\ntensorflow\x1a*tensorflow/core/framework/attr_value.proto\x1a(tensorflow/core/framework/node_def.proto\x1a&tensorflow/core/framework/op_def.proto\"j\n\x12\x46unctionDefLibrary\x12)\n\x08\x66unction\x18\x01 \x03(\x0b\x32\x17.tensorflow.FunctionDef\x12)\n\x08gradient\x18\x02 \x03(\x0b\x32\x17.tensorflow.GradientDef\"\xaa\x02\n\x0b\x46unctionDef\x12$\n\tsignature\x18\x01 \x01(\x0b\x32\x11.tensorflow.OpDef\x12/\n\x04\x61ttr\x18\x05 \x03(\x0b\x32!.tensorflow.FunctionDef.AttrEntry\x12%\n\x08node_def\x18\x03 \x03(\x0b\x32\x13.tensorflow.NodeDef\x12-\n\x03ret\x18\x04 \x03(\x0b\x32 .tensorflow.FunctionDef.RetEntry\x1a\x42\n\tAttrEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tensorflow.AttrValue:\x02\x38\x01\x1a*\n\x08RetEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\";\n\x0bGradientDef\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x15\n\rgradient_func\x18\x02 \x01(\tB/\n\x18org.tensorflow.frameworkB\x0e\x46unctionProtosP\x01\xf8\x01\x01\x62\x06proto3') , dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_node__def__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_op__def__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _FUNCTIONDEFLIBRARY = _descriptor.Descriptor( name='FunctionDefLibrary', full_name='tensorflow.FunctionDefLibrary', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='function', full_name='tensorflow.FunctionDefLibrary.function', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='gradient', full_name='tensorflow.FunctionDefLibrary.gradient', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=182, serialized_end=288, ) _FUNCTIONDEF_ATTRENTRY = _descriptor.Descriptor( name='AttrEntry', full_name='tensorflow.FunctionDef.AttrEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='tensorflow.FunctionDef.AttrEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='tensorflow.FunctionDef.AttrEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=479, serialized_end=545, ) _FUNCTIONDEF_RETENTRY = _descriptor.Descriptor( name='RetEntry', full_name='tensorflow.FunctionDef.RetEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='tensorflow.FunctionDef.RetEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='tensorflow.FunctionDef.RetEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=547, serialized_end=589, ) _FUNCTIONDEF = _descriptor.Descriptor( name='FunctionDef', full_name='tensorflow.FunctionDef', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='signature', full_name='tensorflow.FunctionDef.signature', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='attr', full_name='tensorflow.FunctionDef.attr', index=1, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='node_def', full_name='tensorflow.FunctionDef.node_def', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='ret', full_name='tensorflow.FunctionDef.ret', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_FUNCTIONDEF_ATTRENTRY, _FUNCTIONDEF_RETENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=291, serialized_end=589, ) _GRADIENTDEF = _descriptor.Descriptor( name='GradientDef', full_name='tensorflow.GradientDef', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='function_name', full_name='tensorflow.GradientDef.function_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='gradient_func', full_name='tensorflow.GradientDef.gradient_func', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=591, serialized_end=650, ) _FUNCTIONDEFLIBRARY.fields_by_name['function'].message_type = _FUNCTIONDEF _FUNCTIONDEFLIBRARY.fields_by_name['gradient'].message_type = _GRADIENTDEF _FUNCTIONDEF_ATTRENTRY.fields_by_name['value'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE _FUNCTIONDEF_ATTRENTRY.containing_type = _FUNCTIONDEF _FUNCTIONDEF_RETENTRY.containing_type = _FUNCTIONDEF _FUNCTIONDEF.fields_by_name['signature'].message_type = tensorflow_dot_core_dot_framework_dot_op__def__pb2._OPDEF _FUNCTIONDEF.fields_by_name['attr'].message_type = _FUNCTIONDEF_ATTRENTRY _FUNCTIONDEF.fields_by_name['node_def'].message_type = tensorflow_dot_core_dot_framework_dot_node__def__pb2._NODEDEF _FUNCTIONDEF.fields_by_name['ret'].message_type = _FUNCTIONDEF_RETENTRY DESCRIPTOR.message_types_by_name['FunctionDefLibrary'] = _FUNCTIONDEFLIBRARY DESCRIPTOR.message_types_by_name['FunctionDef'] = _FUNCTIONDEF DESCRIPTOR.message_types_by_name['GradientDef'] = _GRADIENTDEF FunctionDefLibrary = _reflection.GeneratedProtocolMessageType('FunctionDefLibrary', (_message.Message,), dict( DESCRIPTOR = _FUNCTIONDEFLIBRARY, __module__ = 'tensorflow.core.framework.function_pb2' # @@protoc_insertion_point(class_scope:tensorflow.FunctionDefLibrary) )) _sym_db.RegisterMessage(FunctionDefLibrary) FunctionDef = _reflection.GeneratedProtocolMessageType('FunctionDef', (_message.Message,), dict( AttrEntry = _reflection.GeneratedProtocolMessageType('AttrEntry', (_message.Message,), dict( DESCRIPTOR = _FUNCTIONDEF_ATTRENTRY, __module__ = 'tensorflow.core.framework.function_pb2' # @@protoc_insertion_point(class_scope:tensorflow.FunctionDef.AttrEntry) )) , RetEntry = _reflection.GeneratedProtocolMessageType('RetEntry', (_message.Message,), dict( DESCRIPTOR = _FUNCTIONDEF_RETENTRY, __module__ = 'tensorflow.core.framework.function_pb2' # @@protoc_insertion_point(class_scope:tensorflow.FunctionDef.RetEntry) )) , DESCRIPTOR = _FUNCTIONDEF, __module__ = 'tensorflow.core.framework.function_pb2' # @@protoc_insertion_point(class_scope:tensorflow.FunctionDef) )) _sym_db.RegisterMessage(FunctionDef) _sym_db.RegisterMessage(FunctionDef.AttrEntry) _sym_db.RegisterMessage(FunctionDef.RetEntry) GradientDef = _reflection.GeneratedProtocolMessageType('GradientDef', (_message.Message,), dict( DESCRIPTOR = _GRADIENTDEF, __module__ = 'tensorflow.core.framework.function_pb2' # @@protoc_insertion_point(class_scope:tensorflow.GradientDef) )) _sym_db.RegisterMessage(GradientDef) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\016FunctionProtosP\001\370\001\001')) _FUNCTIONDEF_ATTRENTRY.has_options = True _FUNCTIONDEF_ATTRENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _FUNCTIONDEF_RETENTRY.has_options = True _FUNCTIONDEF_RETENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) # @@protoc_insertion_point(module_scope)
mit
ryfeus/lambda-packs
LightGBM_sklearn_scipy_numpy/source/setuptools/pep425tags.py
35
10882
# This file originally from pip: # https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/pep425tags.py """Generate and work with PEP 425 Compatibility Tags.""" from __future__ import absolute_import import distutils.util import platform import re import sys import sysconfig import warnings from collections import OrderedDict from . import glibc _osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') def get_config_var(var): try: return sysconfig.get_config_var(var) except IOError as e: # Issue #1074 warnings.warn("{}".format(e), RuntimeWarning) return None def get_abbr_impl(): """Return abbreviated implementation name.""" if hasattr(sys, 'pypy_version_info'): pyimpl = 'pp' elif sys.platform.startswith('java'): pyimpl = 'jy' elif sys.platform == 'cli': pyimpl = 'ip' else: pyimpl = 'cp' return pyimpl def get_impl_ver(): """Return implementation version.""" impl_ver = get_config_var("py_version_nodot") if not impl_ver or get_abbr_impl() == 'pp': impl_ver = ''.join(map(str, get_impl_version_info())) return impl_ver def get_impl_version_info(): """Return sys.version_info-like tuple for use in decrementing the minor version.""" if get_abbr_impl() == 'pp': # as per https://github.com/pypa/pip/issues/2882 return (sys.version_info[0], sys.pypy_version_info.major, sys.pypy_version_info.minor) else: return sys.version_info[0], sys.version_info[1] def get_impl_tag(): """ Returns the Tag for this specific implementation. """ return "{}{}".format(get_abbr_impl(), get_impl_ver()) def get_flag(var, fallback, expected=True, warn=True): """Use a fallback method for determining SOABI flags if the needed config var is unset or unavailable.""" val = get_config_var(var) if val is None: if warn: warnings.warn("Config variable '{0}' is unset, Python ABI tag may " "be incorrect".format(var), RuntimeWarning, 2) return fallback() return val == expected def get_abi_tag(): """Return the ABI tag based on SOABI (if available) or emulate SOABI (CPython 2, PyPy).""" soabi = get_config_var('SOABI') impl = get_abbr_impl() if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): d = '' m = '' u = '' if get_flag('Py_DEBUG', lambda: hasattr(sys, 'gettotalrefcount'), warn=(impl == 'cp')): d = 'd' if get_flag('WITH_PYMALLOC', lambda: impl == 'cp', warn=(impl == 'cp')): m = 'm' if get_flag('Py_UNICODE_SIZE', lambda: sys.maxunicode == 0x10ffff, expected=4, warn=(impl == 'cp' and sys.version_info < (3, 3))) \ and sys.version_info < (3, 3): u = 'u' abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) elif soabi and soabi.startswith('cpython-'): abi = 'cp' + soabi.split('-')[1] elif soabi: abi = soabi.replace('.', '_').replace('-', '_') else: abi = None return abi def _is_running_32bit(): return sys.maxsize == 2147483647 def get_platform(): """Return our platform name 'win32', 'linux_x86_64'""" if sys.platform == 'darwin': # distutils.util.get_platform() returns the release based on the value # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may # be significantly older than the user's current machine. release, _, machine = platform.mac_ver() split_ver = release.split('.') if machine == "x86_64" and _is_running_32bit(): machine = "i386" elif machine == "ppc64" and _is_running_32bit(): machine = "ppc" return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) # XXX remove distutils dependency result = distutils.util.get_platform().replace('.', '_').replace('-', '_') if result == "linux_x86_64" and _is_running_32bit(): # 32 bit Python program (running on a 64 bit Linux): pip should only # install and run 32 bit compiled extensions in that case. result = "linux_i686" return result def is_manylinux1_compatible(): # Only Linux, and only x86-64 / i686 if get_platform() not in {"linux_x86_64", "linux_i686"}: return False # Check for presence of _manylinux module try: import _manylinux return bool(_manylinux.manylinux1_compatible) except (ImportError, AttributeError): # Fall through to heuristic check below pass # Check glibc version. CentOS 5 uses glibc 2.5. return glibc.have_compatible_glibc(2, 5) def get_darwin_arches(major, minor, machine): """Return a list of supported arches (including group arches) for the given major, minor and machine architecture of an macOS machine. """ arches = [] def _supports_arch(major, minor, arch): # Looking at the application support for macOS versions in the chart # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears # our timeline looks roughly like: # # 10.0 - Introduces ppc support. # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 # and x86_64 support is CLI only, and cannot be used for GUI # applications. # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. # 10.6 - Drops support for ppc64 # 10.7 - Drops support for ppc # # Given that we do not know if we're installing a CLI or a GUI # application, we must be conservative and assume it might be a GUI # application and behave as if ppc64 and x86_64 support did not occur # until 10.5. # # Note: The above information is taken from the "Application support" # column in the chart not the "Processor support" since I believe # that we care about what instruction sets an application can use # not which processors the OS supports. if arch == 'ppc': return (major, minor) <= (10, 5) if arch == 'ppc64': return (major, minor) == (10, 5) if arch == 'i386': return (major, minor) >= (10, 4) if arch == 'x86_64': return (major, minor) >= (10, 5) if arch in groups: for garch in groups[arch]: if _supports_arch(major, minor, garch): return True return False groups = OrderedDict([ ("fat", ("i386", "ppc")), ("intel", ("x86_64", "i386")), ("fat64", ("x86_64", "ppc64")), ("fat32", ("x86_64", "i386", "ppc")), ]) if _supports_arch(major, minor, machine): arches.append(machine) for garch in groups: if machine in groups[garch] and _supports_arch(major, minor, garch): arches.append(garch) arches.append('universal') return arches def get_supported(versions=None, noarch=False, platform=None, impl=None, abi=None): """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. :param platform: specify the exact platform you want valid tags for, or None. If None, use the local system platform. :param impl: specify the exact implementation you want valid tags for, or None. If None, use the local interpreter impl. :param abi: specify the exact abi you want valid tags for, or None. If None, use the local interpreter abi. """ supported = [] # Versions must be given with respect to the preference if versions is None: versions = [] version_info = get_impl_version_info() major = version_info[:-1] # Support all previous minor Python versions. for minor in range(version_info[-1], -1, -1): versions.append(''.join(map(str, major + (minor,)))) impl = impl or get_abbr_impl() abis = [] abi = abi or get_abi_tag() if abi: abis[0:0] = [abi] abi3s = set() import imp for suffix in imp.get_suffixes(): if suffix[0].startswith('.abi'): abi3s.add(suffix[0].split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') if not noarch: arch = platform or get_platform() if arch.startswith('macosx'): # support macosx-10.6-intel on macosx-10.9-x86_64 match = _osx_arch_pat.match(arch) if match: name, major, minor, actual_arch = match.groups() tpl = '{}_{}_%i_%s'.format(name, major) arches = [] for m in reversed(range(int(minor) + 1)): for a in get_darwin_arches(int(major), m, actual_arch): arches.append(tpl % (m, a)) else: # arch pattern didn't match (?!) arches = [arch] elif platform is None and is_manylinux1_compatible(): arches = [arch.replace('linux', 'manylinux1'), arch] else: arches = [arch] # Current version, current API (built specifically for our Python): for abi in abis: for arch in arches: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) # abi3 modules compatible with older version of Python for version in versions[1:]: # abi3 was introduced in Python 3.2 if version in {'31', '30'}: break for abi in abi3s: # empty set if not Python 3 for arch in arches: supported.append(("%s%s" % (impl, version), abi, arch)) # Has binaries, does not use the Python API: for arch in arches: supported.append(('py%s' % (versions[0][0]), 'none', arch)) # No abi / arch, but requires our implementation: supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(('py%s' % (version,), 'none', 'any')) if i == 0: supported.append(('py%s' % (version[0]), 'none', 'any')) return supported implementation_tag = get_impl_tag()
mit
mafiya69/rollcall
rollcall/tests/test_main/test_update_json_file.py
3
1312
import unittest import os from rollcall.tests import helper from datetime import date from rollcall.main import add, update_json_file, fileExists from rollcall.func_json import gen_dict, dict_to_json from rollcall.exce import UnknownTag, SubjectError class Testupdate_json_file(unittest.TestCase): """ tests the update_json_file function in main module """ def setUp(self): self.dire = os.path.dirname(__file__) self.dummy = os.path.join(self.dire, 'dummy.json') helper.newFile(self.dummy) self.new = os.path.join(self.dire, 'new.json') self.sub = os.path.join(self.dire, 'sub.json') json = gen_dict(date.today(), [0, 1, 2, 3, 4, 5, 6]) json = dict_to_json(json) add(str(json), self.sub) def test_update_json_file_file_exists(self): self.assertTrue(fileExists(self.sub)) self.assertTrue(update_json_file('f', self.sub)) def test_update_json_file_file_does_not_exist(self): self.assertFalse(fileExists(self.new)) self.assertRaises(SubjectError, update_json_file, 'f', self.new) def test_update_json_file_unknow_tag(self): self.assertRaises(UnknownTag, update_json_file, 'blahblah', self.dummy) def tearDown(self): os.remove(self.dummy) os.remove(self.sub)
mit
aldur/RedditWallpaperChooser
setup.py
1
2221
#!/usr/bin/env python # encoding: utf-8 """ Python setup file. """ import setuptools import os import os.path from RedditWallpaperChooser import __version__ __author__ = 'aldur' _readme = "README.md" _requirements = "requirements.txt" _requirements_extra = "requirements_extra.txt" def readme(): """Open and return the _readme contents.""" if not os.path.isfile(_readme): return "" with open(_readme) as f: return f.read() def requirements(): """Open and return the _requirements contents.""" if not os.path.isfile(_requirements): return [] with open(_requirements) as f: return [line.rstrip() for line in f] def requirements_extras(): """Open and return the other requirement files content.""" extra = dict() if not os.path.isfile(_requirements_extra): return extra with open(_requirements_extra) as f: extra.update({ 'extras': [line.rstrip() for line in f] }) return extra base_url = 'https://github.com/aldur/RedditWallpaperChooser' setuptools.setup( name='RedditWallpaperChooser', description='Automatically download trending wallpapers from subreddits of your choice.', long_description=readme(), version=__version__, url=base_url, download_url='{}/releases/'.format(base_url), license='MIT', author='aldur', author_email='adrianodl@hotmail.it', packages=[ "RedditWallpaperChooser", ], install_requires=requirements(), extras_require=requirements_extras(), scripts=[os.path.join('bin', f) for f in os.listdir('bin')], zip_safe=False, include_package_data=True, platforms=['any'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.5', 'Topic :: Desktop Environment', 'Topic :: Multimedia', 'Topic :: Utilities', ], keywords="Reddit subreddit wallpaper desktop" )
mit
AndreasMadsen/tensorflow
tensorflow/contrib/integrate/__init__.py
8
1930
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Integration and ODE solvers for TensorFlow. ## Example: Lorenz attractor We can use `odeint` to solve the [Lorentz system](https://en.wikipedia.org/wiki/Lorenz_system) of ordinary differential equations, a prototypical example of chaotic dynamics: ```python rho = 28.0 sigma = 10.0 beta = 8.0/3.0 def lorenz_equation(state, t): x, y, z = tf.unstack(state) dx = sigma * (y - x) dy = x * (rho - z) - y dz = x * y - beta * z return tf.stack([dx, dy, dz]) init_state = tf.constant([0, 2, 20], dtype=tf.float64) t = np.linspace(0, 50, num=5000) tensor_state, tensor_info = tf.contrib.integrate.odeint( lorenz_equation, init_state, t, full_output=True) sess = tf.Session() state, info = sess.run([tensor_state, tensor_info]) x, y, z = state.T plt.plot(x, z) ``` <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="../../images/lorenz_attractor.png" alt> </div> ## Ops @@odeint """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.contrib.integrate.python.ops.odes import * from tensorflow.python.util.all_util import make_all __all__ = make_all(__name__)
apache-2.0
jnerin/ansible
lib/ansible/modules/monitoring/zabbix/zabbix_proxy.py
58
12457
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Alen Komic # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: zabbix_proxy short_description: Zabbix proxy creates/deletes/gets/updates description: - This module allows you to create, modify, get and delete Zabbix proxy entries. version_added: "2.5" author: - "Alen Komic" requirements: - "python >= 2.6" - "zabbix-api >= 0.5.3" options: proxy_name: description: - Name of the proxy in Zabbix. required: true description: description: - Description of the proxy.. required: false status: description: - Type of proxy. (4 - active, 5 - passive) required: false choices: ['active', 'passive'] default: "active" tls_connect: description: - Connections to proxy. required: false choices: ['no_encryption','PSK','certificate'] default: 'no_encryption' tls_accept: description: - Connections from proxy. required: false choices: ['no_encryption','PSK','certificate'] default: 'no_encryption' tls_issuer: description: - Certificate issuer. required: false tls_subject: description: - Certificate subject. required: false tls_psk_identity: description: - PSK identity. Required if either I(tls_connect) or I(tls_accept) has PSK enabled. required: false tls_psk: description: - The preshared key, at least 32 hex digits. Required if either I(tls_connect) or I(tls_accept) has PSK enabled. required: false state: description: - State of the proxy. - On C(present), it will create if proxy does not exist or update the proxy if the associated data is different. - On C(absent) will remove a proxy if it exists. required: false choices: ['present', 'absent'] default: "present" interface: description: - Dictionary with params for the interface when proxy is in passive mode - 'Available values are: dns, ip, main, port, type and useip.' - Please review the interface documentation for more information on the supported properties - U(https://www.zabbix.com/documentation/3.2/manual/api/reference/proxy/object#proxy_interface) required: false default: {} extends_documentation_fragment: - zabbix ''' EXAMPLES = ''' - name: Create a new proxy or update an existing proxies info local_action: module: zabbix_proxy server_url: http://monitor.example.com login_user: username login_password: password proxy_name: ExampleProxy description: ExampleProxy status: active state: present interface: type: 0 main: 1 useip: 1 ip: 10.xx.xx.xx dns: "" port: 10050 ''' RETURN = ''' # ''' from ansible.module_utils.basic import AnsibleModule try: from zabbix_api import ZabbixAPI HAS_ZABBIX_API = True except ImportError: HAS_ZABBIX_API = False class Proxy(object): def __init__(self, module, zbx): self._module = module self._zapi = zbx self.existing_data = None def proxy_exists(self, proxy_name): result = self._zapi.proxy.get({ 'output': 'extend', 'selectInterface': 'extend', 'filter': {'host': proxy_name}}) if len(result) > 0 and 'proxyid' in result[0]: self.existing_data = result[0] return result[0]['proxyid'] else: return result def add_proxy(self, data): try: if self._module.check_mode: self._module.exit_json(changed=True) parameters = {} for item in data: if data[item]: parameters[item] = data[item] proxy_ids_list = self._zapi.proxy.create(parameters) self._module.exit_json(changed=True, result="Successfully added proxy %s (%s)" % (data['host'], data['status'])) if len(proxy_ids_list) >= 1: return proxy_ids_list['proxyids'][0] except Exception as e: self._module.fail_json(msg="Failed to create proxy %s: %s" % (data['host'], e)) def delete_proxy(self, proxy_id, proxy_name): try: if self._module.check_mode: self._module.exit_json(changed=True) self._zapi.proxy.delete([proxy_id]) self._module.exit_json(changed=True, result="Successfully deleted" + " proxy %s" % proxy_name) except Exception as e: self._module.fail_json(msg="Failed to delete proxy %s: %s" % (proxy_name, str(e))) def compile_interface_params(self, new_interface): old_interface = {} if 'interface' in self.existing_data and \ len(self.existing_data['interface']) > 0: old_interface = self.existing_data['interface'] final_interface = old_interface.copy() final_interface.update(new_interface) final_interface = dict((k, str(v)) for k, v in final_interface.items()) if final_interface != old_interface: return final_interface else: return {} def update_proxy(self, proxy_id, data): try: if self._module.check_mode: self._module.exit_json(changed=True) parameters = {'proxyid': proxy_id} for item in data: if data[item] and item in self.existing_data and \ self.existing_data[item] != data[item]: parameters[item] = data[item] if 'interface' in parameters: parameters.pop('interface') if 'interface' in data and data['status'] == '6': new_interface = self.compile_interface_params(data['interface']) if len(new_interface) > 0: parameters['interface'] = new_interface if len(parameters) > 1: self._zapi.proxy.update(parameters) self._module.exit_json( changed=True, result="Successfully updated proxy %s (%s)" % (data['host'], proxy_id) ) else: self._module.exit_json(changed=False) except Exception as e: self._module.fail_json(msg="Failed to update proxy %s: %s" % (data['host'], e)) def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), proxy_name=dict(type='str', required=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), validate_certs=dict(type='bool', required=False, default=True), status=dict(default="active", choices=['active', 'passive']), state=dict(default="present", choices=['present', 'absent']), description=dict(type='str', required=False), tls_connect=dict(default='no_encryption', choices=['no_encryption', 'PSK', 'certificate']), tls_accept=dict(default='no_encryption', choices=['no_encryption', 'PSK', 'certificate']), tls_issuer=dict(type='str', required=False, default=None), tls_subject=dict(type='str', required=False, default=None), tls_psk_identity=dict(type='str', required=False, default=None), tls_psk=dict(type='str', required=False, default=None), timeout=dict(type='int', default=10), interface=dict(type='dict', required=False, default={}) ), supports_check_mode=True ) if not HAS_ZABBIX_API: module.fail_json(msg="Missing requried zabbix-api module" + " (check docs or install with:" + " pip install zabbix-api)") server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] proxy_name = module.params['proxy_name'] description = module.params['description'] status = module.params['status'] tls_connect = module.params['tls_connect'] tls_accept = module.params['tls_accept'] tls_issuer = module.params['tls_issuer'] tls_subject = module.params['tls_subject'] tls_psk_identity = module.params['tls_psk_identity'] tls_psk = module.params['tls_psk'] state = module.params['state'] timeout = module.params['timeout'] interface = module.params['interface'] # convert enabled to 0; disabled to 1 status = 6 if status == "passive" else 5 if tls_connect == 'certificate': tls_connect = 4 elif tls_connect == 'PSK': tls_connect = 2 else: tls_connect = 1 if tls_accept == 'certificate': tls_accept = 4 elif tls_accept == 'PSK': tls_accept = 2 else: tls_accept = 1 zbx = None # login to zabbix try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) proxy = Proxy(module, zbx) # check if proxy already exists proxy_id = proxy.proxy_exists(proxy_name) if proxy_id: if state == "absent": # remove proxy proxy.delete_proxy(proxy_id, proxy_name) else: proxy.update_proxy(proxy_id, { 'host': proxy_name, 'description': description, 'status': str(status), 'tls_connect': str(tls_connect), 'tls_accept': str(tls_accept), 'tls_issuer': tls_issuer, 'tls_subject': tls_subject, 'tls_psk_identity': tls_psk_identity, 'tls_psk': tls_psk, 'interface': interface }) else: if state == "absent": # the proxy is already deleted. module.exit_json(changed=False) proxy_id = proxy.add_proxy(data={ 'host': proxy_name, 'description': description, 'status': str(status), 'tls_connect': str(tls_connect), 'tls_accept': str(tls_accept), 'tls_issuer': tls_issuer, 'tls_subject': tls_subject, 'tls_psk_identity': tls_psk_identity, 'tls_psk': tls_psk, 'interface': interface }) if __name__ == '__main__': main()
gpl-3.0