body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(self, zvar, key): 'Create a Hyperslice\n\n @param zvar: zVariable that this slices\n @type zvar: :py:class:`pycdf.Var`\n @param key: Python multi-dimensional slice as passed to\n __getitem__\n @type key: tuple of slice and/or int\n @raise IndexError: if slice is out of range, mismatches dimensions, or\n otherwise unparsable.\n @raise ValueError: if slice has invalid values\n ' self.zvar = zvar self.rv = self.zvar.rv() self.dims = (zvar._n_dims() + 1) self.dimsizes = ([len(zvar)] + zvar._dim_sizes()) self.starts = ([0] * self.dims) self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = ([1] * self.dims) self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) if (not hasattr(key, '__len__')): key = (key,) if (not self.rv): key = ((0,) + key) key = self.expand_ellipsis(key, self.dims) if self.rv: if (len(key) == 1): key = self.expand_ellipsis((key + (Ellipsis,)), self.dims) elif (len(key) == (self.dims - 1)): key = ((slice(None, None, None),) + key) if (len(key) == self.dims): self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: if (idx < 0): idx += self.dimsizes[i] if ((idx != 0) and ((idx >= self.dimsizes[i]) or (idx < 0))): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError(('Slice does not match dimensions for zVar ' + str(zvar._name))) self.column = zvar.cdf_file.col_major()
-3,304,340,210,359,956,500
Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values
pycdf/__init__.py
__init__
cpiker/condaCDF
python
def __init__(self, zvar, key): 'Create a Hyperslice\n\n @param zvar: zVariable that this slices\n @type zvar: :py:class:`pycdf.Var`\n @param key: Python multi-dimensional slice as passed to\n __getitem__\n @type key: tuple of slice and/or int\n @raise IndexError: if slice is out of range, mismatches dimensions, or\n otherwise unparsable.\n @raise ValueError: if slice has invalid values\n ' self.zvar = zvar self.rv = self.zvar.rv() self.dims = (zvar._n_dims() + 1) self.dimsizes = ([len(zvar)] + zvar._dim_sizes()) self.starts = ([0] * self.dims) self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = ([1] * self.dims) self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) if (not hasattr(key, '__len__')): key = (key,) if (not self.rv): key = ((0,) + key) key = self.expand_ellipsis(key, self.dims) if self.rv: if (len(key) == 1): key = self.expand_ellipsis((key + (Ellipsis,)), self.dims) elif (len(key) == (self.dims - 1)): key = ((slice(None, None, None),) + key) if (len(key) == self.dims): self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: if (idx < 0): idx += self.dimsizes[i] if ((idx != 0) and ((idx >= self.dimsizes[i]) or (idx < 0))): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError(('Slice does not match dimensions for zVar ' + str(zvar._name))) self.column = zvar.cdf_file.col_major()
def expected_dims(self, data=None): 'Calculate size of non-degenerate dimensions\n\n Figures out size, in each dimension, of expected input data\n\n @return: size of each dimension for this slice, excluding degenerate\n @rtype: list of int\n ' return [self.counts[i] for i in range(self.dims) if (not self.degen[i])]
-5,050,537,631,013,452,000
Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int
pycdf/__init__.py
expected_dims
cpiker/condaCDF
python
def expected_dims(self, data=None): 'Calculate size of non-degenerate dimensions\n\n Figures out size, in each dimension, of expected input data\n\n @return: size of each dimension for this slice, excluding degenerate\n @rtype: list of int\n ' return [self.counts[i] for i in range(self.dims) if (not self.degen[i])]
def expand(self, data): "Expands the record dimension of this slice to hold a set of data\n\n If the length of data (outermost dimension) is larger than the record\n count (counts[0]) for this slice, expand the slice to hold all the data.\n This requires that the record dimension of the slice not be degenerate,\n and also that it not have been completely specified when the hyperslice\n was created (i.e. record dimension either ellipsis or no specified\n stop.)\n\n Does *not* expand any other dimension, since that's Very Hard in CDF.\n\n @param data: the data which are intended to be stored in this slice\n @type data: list\n " rec_slice = self.expanded_key[0] if ((not self.rv) or isinstance(data, str_classes) or self.degen[0] or (not hasattr(rec_slice, 'stop'))): return if (len(data) < self.counts[0]): if ((rec_slice.stop is None) and (rec_slice.step in (None, 1))): self.counts[0] = len(data) elif (len(data) > self.counts[0]): if (rec_slice.step in (None, 1)): self.counts[0] = len(data)
3,023,531,243,950,046,000
Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list
pycdf/__init__.py
expand
cpiker/condaCDF
python
def expand(self, data): "Expands the record dimension of this slice to hold a set of data\n\n If the length of data (outermost dimension) is larger than the record\n count (counts[0]) for this slice, expand the slice to hold all the data.\n This requires that the record dimension of the slice not be degenerate,\n and also that it not have been completely specified when the hyperslice\n was created (i.e. record dimension either ellipsis or no specified\n stop.)\n\n Does *not* expand any other dimension, since that's Very Hard in CDF.\n\n @param data: the data which are intended to be stored in this slice\n @type data: list\n " rec_slice = self.expanded_key[0] if ((not self.rv) or isinstance(data, str_classes) or self.degen[0] or (not hasattr(rec_slice, 'stop'))): return if (len(data) < self.counts[0]): if ((rec_slice.stop is None) and (rec_slice.step in (None, 1))): self.counts[0] = len(data) elif (len(data) > self.counts[0]): if (rec_slice.step in (None, 1)): self.counts[0] = len(data)
def create_array(self): 'Creates a numpy array to hold the data from this slice\n\n Returns\n =======\n out : numpy.array\n array sized, typed, and dimensioned to hold data from\n this slice\n ' counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) array = numpy.empty([counts[i] for i in range(len(counts)) if (not degen[i])], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W'))
-6,894,357,864,719,320,000
Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice
pycdf/__init__.py
create_array
cpiker/condaCDF
python
def create_array(self): 'Creates a numpy array to hold the data from this slice\n\n Returns\n =======\n out : numpy.array\n array sized, typed, and dimensioned to hold data from\n this slice\n ' counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) array = numpy.empty([counts[i] for i in range(len(counts)) if (not degen[i])], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W'))
def convert_input_array(self, buffer): 'Converts a buffer of raw data from this slice\n\n EPOCH(16) variables always need to be converted.\n CHAR need converted to Unicode if py3k\n\n Parameters\n ==========\n buffer : numpy.array\n data as read from the CDF file\n\n Returns\n =======\n out : numpy.array\n converted data\n ' result = self._flip_array(buffer) cdftype = self.zvar.type() if (not self.zvar._raw): if ((cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value)) and (str != bytes)): dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif (cdftype == const.CDF_EPOCH.value): result = lib.v_epoch_to_datetime(result) elif (cdftype == const.CDF_EPOCH16.value): result = lib.v_epoch16_to_datetime(result) elif (cdftype == const.CDF_TIME_TT2000.value): result = lib.v_tt2000_to_datetime(result) return result
3,856,380,486,523,786,000
Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data
pycdf/__init__.py
convert_input_array
cpiker/condaCDF
python
def convert_input_array(self, buffer): 'Converts a buffer of raw data from this slice\n\n EPOCH(16) variables always need to be converted.\n CHAR need converted to Unicode if py3k\n\n Parameters\n ==========\n buffer : numpy.array\n data as read from the CDF file\n\n Returns\n =======\n out : numpy.array\n converted data\n ' result = self._flip_array(buffer) cdftype = self.zvar.type() if (not self.zvar._raw): if ((cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value)) and (str != bytes)): dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif (cdftype == const.CDF_EPOCH.value): result = lib.v_epoch_to_datetime(result) elif (cdftype == const.CDF_EPOCH16.value): result = lib.v_epoch16_to_datetime(result) elif (cdftype == const.CDF_TIME_TT2000.value): result = lib.v_tt2000_to_datetime(result) return result
def convert_output_array(self, buffer): 'Convert a buffer of data that will go into this slice\n \n Parameters\n ==========\n buffer : numpy.array\n data to go into the CDF file\n\n Returns\n =======\n out : numpy.array\n input with majority flipped and dimensions reversed to be\n suitable to pass directly to CDF library.\n ' buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W'))
-1,402,077,456,776,283,000
Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library.
pycdf/__init__.py
convert_output_array
cpiker/condaCDF
python
def convert_output_array(self, buffer): 'Convert a buffer of data that will go into this slice\n \n Parameters\n ==========\n buffer : numpy.array\n data to go into the CDF file\n\n Returns\n =======\n out : numpy.array\n input with majority flipped and dimensions reversed to be\n suitable to pass directly to CDF library.\n ' buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W'))
def _flip_array(self, data): '\n Operations for majority, etc. common between convert_input and _output\n ' cdftype = self.zvar.type() if (self.column and (not min(self.degen))): if self.degen[0]: if (cdftype == const.CDF_EPOCH16.value): data = data.transpose((list(range((len(data.shape) - 2), 0, (- 1))) + [(len(data.shape) - 1)])) else: data = data.transpose() elif (cdftype == const.CDF_EPOCH16.value): data = data.transpose((([0] + list(range((len(data.shape) - 2), 0, (- 1)))) + [(len(data.shape) - 1)])) else: data = data.transpose(([0] + list(range((len(data.shape) - 1), 0, (- 1))))) if self.rev.any(): sliced = [(slice(None, None, (- 1)) if self.rev[i] else slice(None)) for i in range(self.dims) if (not self.degen[i])] if (cdftype == const.CDF_EPOCH16.value): sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data
4,481,637,856,396,805,600
Operations for majority, etc. common between convert_input and _output
pycdf/__init__.py
_flip_array
cpiker/condaCDF
python
def _flip_array(self, data): '\n \n ' cdftype = self.zvar.type() if (self.column and (not min(self.degen))): if self.degen[0]: if (cdftype == const.CDF_EPOCH16.value): data = data.transpose((list(range((len(data.shape) - 2), 0, (- 1))) + [(len(data.shape) - 1)])) else: data = data.transpose() elif (cdftype == const.CDF_EPOCH16.value): data = data.transpose((([0] + list(range((len(data.shape) - 2), 0, (- 1)))) + [(len(data.shape) - 1)])) else: data = data.transpose(([0] + list(range((len(data.shape) - 1), 0, (- 1))))) if self.rev.any(): sliced = [(slice(None, None, (- 1)) if self.rev[i] else slice(None)) for i in range(self.dims) if (not self.degen[i])] if (cdftype == const.CDF_EPOCH16.value): sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data
def select(self): 'Selects this hyperslice in the CDF\n\n Calls the CDF library to select the CDF, variable, records, and\n array elements corresponding to this slice.\n ' args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if (self.dims > 1): dims = (self.dims - 1) args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args)
4,518,925,746,976,599,000
Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice.
pycdf/__init__.py
select
cpiker/condaCDF
python
def select(self): 'Selects this hyperslice in the CDF\n\n Calls the CDF library to select the CDF, variable, records, and\n array elements corresponding to this slice.\n ' args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if (self.dims > 1): dims = (self.dims - 1) args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args)
@staticmethod def expand_ellipsis(slices, n_dims): 'Expands any ellipses into correct number of full-size slices\n\n @param slices: tuple of slices, integers, or ellipse objects\n @type slices: tuple\n @param n_dims: number of dimensions this slice is over\n @type n_dims: int\n @return: L{slices} with ellipses replaced by appropriate number of\n full-dimension slices\n @rtype: tuple\n @raise IndexError: if ellipses specified when already have enough\n dimensions\n ' if (slices is Ellipsis): return tuple([slice(None, None, None) for i in range(n_dims)]) idx = [i for (i, v) in enumerate(slices) if (v is Ellipsis)] if (not idx): return slices if (len(idx) > 1): raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] extra = ((n_dims - len(slices)) + 1) if (extra < 0): raise IndexError('too many indices') result = ((slices[0:idx] + ((slice(None),) * extra)) + slices[(idx + 1):]) return result
8,211,416,177,040,501,000
Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions
pycdf/__init__.py
expand_ellipsis
cpiker/condaCDF
python
@staticmethod def expand_ellipsis(slices, n_dims): 'Expands any ellipses into correct number of full-size slices\n\n @param slices: tuple of slices, integers, or ellipse objects\n @type slices: tuple\n @param n_dims: number of dimensions this slice is over\n @type n_dims: int\n @return: L{slices} with ellipses replaced by appropriate number of\n full-dimension slices\n @rtype: tuple\n @raise IndexError: if ellipses specified when already have enough\n dimensions\n ' if (slices is Ellipsis): return tuple([slice(None, None, None) for i in range(n_dims)]) idx = [i for (i, v) in enumerate(slices) if (v is Ellipsis)] if (not idx): return slices if (len(idx) > 1): raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] extra = ((n_dims - len(slices)) + 1) if (extra < 0): raise IndexError('too many indices') result = ((slices[0:idx] + ((slice(None),) * extra)) + slices[(idx + 1):]) return result
@staticmethod def check_well_formed(data): 'Checks if input data is well-formed, regular array' d = numpy.asanyarray(data) if (d.dtype == numpy.object): try: len(d.flat[0]) except TypeError: pass else: raise ValueError('Data must be well-formed, regular array of number, string, or datetime')
-5,000,262,813,788,040,000
Checks if input data is well-formed, regular array
pycdf/__init__.py
check_well_formed
cpiker/condaCDF
python
@staticmethod def check_well_formed(data): d = numpy.asanyarray(data) if (d.dtype == numpy.object): try: len(d.flat[0]) except TypeError: pass else: raise ValueError('Data must be well-formed, regular array of number, string, or datetime')
@staticmethod def dimensions(data): 'Finds the dimensions of a nested list-of-lists\n\n @param data: data of which dimensions are desired\n @type data: list (of lists)\n @return: dimensions of L{data}, in order outside-in\n @rtype: list of int\n @raise ValueError: if L{data} has irregular dimensions\n ' d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape
1,178,304,907,912,685,000
Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions
pycdf/__init__.py
dimensions
cpiker/condaCDF
python
@staticmethod def dimensions(data): 'Finds the dimensions of a nested list-of-lists\n\n @param data: data of which dimensions are desired\n @type data: list (of lists)\n @return: dimensions of L{data}, in order outside-in\n @rtype: list of int\n @raise ValueError: if L{data} has irregular dimensions\n ' d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape
@staticmethod def types(data, backward=False): 'Find dimensions and valid types of a nested list-of-lists\n\n Any given data may be representable by a range of CDF types; infer\n the CDF types which can represent this data. This breaks down to:\n 1. Proper kind (numerical, string, time)\n 2. Proper range (stores highest and lowest number)\n 3. Sufficient resolution (EPOCH16 required if datetime has\n microseconds or below.)\n\n If more than one value satisfies the requirements, types are returned\n in preferred order:\n 1. Type that matches precision of data first, then\n 2. integer type before float type, then\n 3. Smallest type first, then\n 4. signed type first, then\n 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1)\n So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies\n below the millisecond level (rule 1), but otherwise EPOCH is preferred\n (rule 2).\n\n For floats, four-byte is preferred unless eight-byte is required:\n 1. absolute values between 0 and 3e-39\n 2. absolute values greater than 1.7e38\n This will switch to an eight-byte double in some cases where four bytes\n would be sufficient for IEEE 754 encoding, but where DEC formats would\n require eight.\n\n @param data: data for which dimensions and CDF types are desired\n @type data: list (of lists)\n @param backward: limit to pre-CDF3 types\n @type backward: bool\n @return: dimensions of L{data}, in order outside-in;\n CDF types which can represent this data;\n number of elements required (i.e. length of longest string)\n @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int])\n @raise ValueError: if L{data} has irregular dimensions\n ' d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if (d.dtype.kind in ('S', 'U')): types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if (d.dtype.kind == 'U'): elements //= 4 elif (d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond')): if (max(((dt.microsecond % 1000) for dt in d.flat)) > 0): types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[(- 1)] elif (not lib.supports_int8): del types[(- 1)] elif ((d is data) or isinstance(data, numpy.generic)): types = [k for k in lib.numpytypedict if (((lib.numpytypedict[k] == d.dtype) or (lib.numpytypedict[k] == d.dtype.newbyteorder())) and (not (k in lib.timetypes)))] if (((not lib.supports_int8) or backward) and (const.CDF_INT8.value in types)): del types[types.index(const.CDF_INT8.value)] types.sort(key=(lambda x: (x % 50)), reverse=True) if (not types): if (d.dtype.kind == 'O'): trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: continue if (newd == d).all(): d = newd break else: raise ValueError('Cannot convert generic objects to CDF type.') if (d.dtype.kind in ('i', 'u')): minval = numpy.min(d) maxval = numpy.max(d) if (minval < 0): types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [(2 ** 7), (2 ** 7), (2 ** 15), (2 ** 31), (2 ** 63), 1.7e+38, 1.7e+38, 8e+307, 8e+307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [(2 ** 7), (2 ** 7), (2 ** 8), (2 ** 15), (2 ** 16), (2 ** 31), (2 ** 32), (2 ** 63), 1.7e+38, 1.7e+38, 8e+307, 8e+307] types = [t for (t, c) in zip(types, cutoffs) if ((c > maxval) and ((minval >= 0) or (minval >= (- c))))] if (((not lib.supports_int8) or backward) and (const.CDF_INT8 in types)): del types[types.index(const.CDF_INT8)] elif (dims is ()): if ((d != 0) and ((abs(d) > 1.7e+38) or (abs(d) < 3e-39))): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[(d != 0)]) if ((len(absolutes) > 0) and ((numpy.max(absolutes) > 1.7e+38) or (numpy.min(absolutes) < 3e-39))): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [(t.value if hasattr(t, 'value') else t) for t in types] return (dims, types, elements)
8,817,305,071,486,589,000
Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions
pycdf/__init__.py
types
cpiker/condaCDF
python
@staticmethod def types(data, backward=False): 'Find dimensions and valid types of a nested list-of-lists\n\n Any given data may be representable by a range of CDF types; infer\n the CDF types which can represent this data. This breaks down to:\n 1. Proper kind (numerical, string, time)\n 2. Proper range (stores highest and lowest number)\n 3. Sufficient resolution (EPOCH16 required if datetime has\n microseconds or below.)\n\n If more than one value satisfies the requirements, types are returned\n in preferred order:\n 1. Type that matches precision of data first, then\n 2. integer type before float type, then\n 3. Smallest type first, then\n 4. signed type first, then\n 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1)\n So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies\n below the millisecond level (rule 1), but otherwise EPOCH is preferred\n (rule 2).\n\n For floats, four-byte is preferred unless eight-byte is required:\n 1. absolute values between 0 and 3e-39\n 2. absolute values greater than 1.7e38\n This will switch to an eight-byte double in some cases where four bytes\n would be sufficient for IEEE 754 encoding, but where DEC formats would\n require eight.\n\n @param data: data for which dimensions and CDF types are desired\n @type data: list (of lists)\n @param backward: limit to pre-CDF3 types\n @type backward: bool\n @return: dimensions of L{data}, in order outside-in;\n CDF types which can represent this data;\n number of elements required (i.e. length of longest string)\n @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int])\n @raise ValueError: if L{data} has irregular dimensions\n ' d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if (d.dtype.kind in ('S', 'U')): types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if (d.dtype.kind == 'U'): elements //= 4 elif (d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond')): if (max(((dt.microsecond % 1000) for dt in d.flat)) > 0): types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[(- 1)] elif (not lib.supports_int8): del types[(- 1)] elif ((d is data) or isinstance(data, numpy.generic)): types = [k for k in lib.numpytypedict if (((lib.numpytypedict[k] == d.dtype) or (lib.numpytypedict[k] == d.dtype.newbyteorder())) and (not (k in lib.timetypes)))] if (((not lib.supports_int8) or backward) and (const.CDF_INT8.value in types)): del types[types.index(const.CDF_INT8.value)] types.sort(key=(lambda x: (x % 50)), reverse=True) if (not types): if (d.dtype.kind == 'O'): trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: continue if (newd == d).all(): d = newd break else: raise ValueError('Cannot convert generic objects to CDF type.') if (d.dtype.kind in ('i', 'u')): minval = numpy.min(d) maxval = numpy.max(d) if (minval < 0): types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [(2 ** 7), (2 ** 7), (2 ** 15), (2 ** 31), (2 ** 63), 1.7e+38, 1.7e+38, 8e+307, 8e+307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [(2 ** 7), (2 ** 7), (2 ** 8), (2 ** 15), (2 ** 16), (2 ** 31), (2 ** 32), (2 ** 63), 1.7e+38, 1.7e+38, 8e+307, 8e+307] types = [t for (t, c) in zip(types, cutoffs) if ((c > maxval) and ((minval >= 0) or (minval >= (- c))))] if (((not lib.supports_int8) or backward) and (const.CDF_INT8 in types)): del types[types.index(const.CDF_INT8)] elif (dims is ()): if ((d != 0) and ((abs(d) > 1.7e+38) or (abs(d) < 3e-39))): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[(d != 0)]) if ((len(absolutes) > 0) and ((numpy.max(absolutes) > 1.7e+38) or (numpy.min(absolutes) < 3e-39))): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [(t.value if hasattr(t, 'value') else t) for t in types] return (dims, types, elements)
@staticmethod def reorder(seq): 'Reorders seq to switch array majority\n\n Used to take an array of subscripts between row\n and column majority. First element is not touched,\n being the record number.\n\n @param seq: a sequence of *subscripts*\n @type seq: sequence of integers\n @return: seq with all but element 0 reversed in order\n @rtype: sequence of integers\n ' return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:(- 1)]))
8,341,042,401,191,011,000
Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers
pycdf/__init__.py
reorder
cpiker/condaCDF
python
@staticmethod def reorder(seq): 'Reorders seq to switch array majority\n\n Used to take an array of subscripts between row\n and column majority. First element is not touched,\n being the record number.\n\n @param seq: a sequence of *subscripts*\n @type seq: sequence of integers\n @return: seq with all but element 0 reversed in order\n @rtype: sequence of integers\n ' return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:(- 1)]))
@staticmethod def convert_range(start, stop, step, size): 'Converts a start/stop/step range to start/count/interval\n\n (i.e. changes from Python-style slice to CDF-style)\n @param start: index to start a slice at, may be none or negative\n @type start: int\n @param stop: index at end of slice (one-past, standard Python),\n may be none or negative\n @type stop: int\n @param step: interval for stepping through stlice\n @type step: int\n @param size: size of list to slice\n @type size: int\n @return: (start, count, interval, rev) where:\n 1. start is the start index, normalized to be within\n the size of the list and negatives handled\n 2. count is the number of records in the slice,\n guaranteed to stop before the end\n 3. interval is the skip between records\n 4. rev indicates whether the sequence should be reversed\n @rtype: (int, int, int, boolean)\n ' (start, stop, step) = slice(start, stop, step).indices(size) if (step < 0): step *= (- 1) count = int(((((start - stop) + step) - 1) / step)) start = (start - ((count - 1) * step)) rev = True else: count = int(((((stop - start) + step) - 1) / step)) rev = False if (count < 0): count = 0 start = 0 return (start, count, step, rev)
435,904,559,742,375,740
Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean)
pycdf/__init__.py
convert_range
cpiker/condaCDF
python
@staticmethod def convert_range(start, stop, step, size): 'Converts a start/stop/step range to start/count/interval\n\n (i.e. changes from Python-style slice to CDF-style)\n @param start: index to start a slice at, may be none or negative\n @type start: int\n @param stop: index at end of slice (one-past, standard Python),\n may be none or negative\n @type stop: int\n @param step: interval for stepping through stlice\n @type step: int\n @param size: size of list to slice\n @type size: int\n @return: (start, count, interval, rev) where:\n 1. start is the start index, normalized to be within\n the size of the list and negatives handled\n 2. count is the number of records in the slice,\n guaranteed to stop before the end\n 3. interval is the skip between records\n 4. rev indicates whether the sequence should be reversed\n @rtype: (int, int, int, boolean)\n ' (start, stop, step) = slice(start, stop, step).indices(size) if (step < 0): step *= (- 1) count = int(((((start - stop) + step) - 1) / step)) start = (start - ((count - 1) * step)) rev = True else: count = int(((((stop - start) + step) - 1) / step)) rev = False if (count < 0): count = 0 start = 0 return (start, count, step, rev)
def __init__(self, cdf_file, attr_name, create=False): 'Initialize this attribute\n\n @param cdf_file: CDF file containing this attribute\n @type cdf_file: :py:class:`pycdf.CDF`\n @param attr_name: Name of this attribute\n @type attr_name: str\n @param create: True to create attribute, False to look up existing.\n @type create: bool\n ' self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache(self._name, attrno.value, (self.SCOPE == const.GLOBAL_SCOPE)) else: (attrno, scope) = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer((const.CDF_ATTR_NAME_LEN256 + 1)) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) self._cdf_file._call(const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if (scope.value == const.GLOBAL_SCOPE.value): scope = True elif (scope.value == const.VARIABLE_SCOPE.value): scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope)
-2,708,256,634,244,691,500
Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool
pycdf/__init__.py
__init__
cpiker/condaCDF
python
def __init__(self, cdf_file, attr_name, create=False): 'Initialize this attribute\n\n @param cdf_file: CDF file containing this attribute\n @type cdf_file: :py:class:`pycdf.CDF`\n @param attr_name: Name of this attribute\n @type attr_name: str\n @param create: True to create attribute, False to look up existing.\n @type create: bool\n ' self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache(self._name, attrno.value, (self.SCOPE == const.GLOBAL_SCOPE)) else: (attrno, scope) = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer((const.CDF_ATTR_NAME_LEN256 + 1)) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) self._cdf_file._call(const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if (scope.value == const.GLOBAL_SCOPE.value): scope = True elif (scope.value == const.VARIABLE_SCOPE.value): scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope)
def __getitem__(self, key): 'Return a slice of Entries.\n\n Because Attributes may be sparse, a multi-element slice will return\n None for those elements which do not have associated Entries.\n\n @param key: index or range of Entry number to return\n @type key: slice or int\n @return: a list of entries, appropriate type.\n @raise IndexError: if L{key} is an int and that Entry number does not\n exist.\n ' if (key is Ellipsis): key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices((self.max_idx() + 1))) return [(self._get_entry(i) if self.has_entry(i) else None) for i in idx] elif self.has_entry(key): return self._get_entry(key) else: raise IndexError((('list index ' + str(key)) + ' out of range.'))
1,325,622,620,025,109,200
Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist.
pycdf/__init__.py
__getitem__
cpiker/condaCDF
python
def __getitem__(self, key): 'Return a slice of Entries.\n\n Because Attributes may be sparse, a multi-element slice will return\n None for those elements which do not have associated Entries.\n\n @param key: index or range of Entry number to return\n @type key: slice or int\n @return: a list of entries, appropriate type.\n @raise IndexError: if L{key} is an int and that Entry number does not\n exist.\n ' if (key is Ellipsis): key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices((self.max_idx() + 1))) return [(self._get_entry(i) if self.has_entry(i) else None) for i in idx] elif self.has_entry(key): return self._get_entry(key) else: raise IndexError((('list index ' + str(key)) + ' out of range.'))
def _check_other_entries(self, types): "Try to get the type of this entry from others in the Attribute\n\n For zAttrs, checks if all other Entries are the same type, and at\n least one doesn't match its zVar, i.e. Entry type dominates (otherwise\n assumption is the Var type dominates).\n\n For gAttrs, checks all other Entries, and gives priority to the\n one that's earliest in the possible type list and exists in other\n Entries.\n\n This is only one component of Entry type guessing!\n\n :param list types: CDF types that are candidates (match the data)\n :return: The type discerned from other Entries, or None\n " if (self.ENTRY_ == const.zENTRY_): cand_et = None one_var_diff = False for num in range((self.max_idx() + 1)): if (not self.has_entry(num)): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if (vartype != entrytype): one_var_diff = True if (cand_et is None): if (not (entrytype in types)): return None cand_et = entrytype elif (cand_et != entrytype): return None if (one_var_diff and (cand_et is not None)): return cand_et else: entrytypes = [self.type(num) for num in range((self.max_idx() + 1)) if self.has_entry(num)] entrytypes = [et for et in entrytypes if (et in types)] if entrytypes: return types[min([types.index(et) for et in entrytypes])] return None
2,458,194,628,741,711,400
Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None
pycdf/__init__.py
_check_other_entries
cpiker/condaCDF
python
def _check_other_entries(self, types): "Try to get the type of this entry from others in the Attribute\n\n For zAttrs, checks if all other Entries are the same type, and at\n least one doesn't match its zVar, i.e. Entry type dominates (otherwise\n assumption is the Var type dominates).\n\n For gAttrs, checks all other Entries, and gives priority to the\n one that's earliest in the possible type list and exists in other\n Entries.\n\n This is only one component of Entry type guessing!\n\n :param list types: CDF types that are candidates (match the data)\n :return: The type discerned from other Entries, or None\n " if (self.ENTRY_ == const.zENTRY_): cand_et = None one_var_diff = False for num in range((self.max_idx() + 1)): if (not self.has_entry(num)): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if (vartype != entrytype): one_var_diff = True if (cand_et is None): if (not (entrytype in types)): return None cand_et = entrytype elif (cand_et != entrytype): return None if (one_var_diff and (cand_et is not None)): return cand_et else: entrytypes = [self.type(num) for num in range((self.max_idx() + 1)) if self.has_entry(num)] entrytypes = [et for et in entrytypes if (et in types)] if entrytypes: return types[min([types.index(et) for et in entrytypes])] return None
def __setitem__(self, key, data): "Set a slice of Entries.\n\n @param key: index or range of Entry numbers to set\n @type key: slice or int\n @param data: the data to set these entries to. Normally each entry should\n be a sequence; if a scalar is provided, it is treated\n as a single-element list.\n @type data: scalar or list\n @raise ValueError: if size of {data} does not match size of L{key}\n @note: Attributes do not 'grow' or 'shrink' as entries are added\n or removed. Indexes of entries never change and there is no\n way to 'insert'.\n " if (key is Ellipsis): key = slice(None, None, None) if (not hasattr(key, 'indices')): idx = (key, (key + 1), 1) data = [data] else: idx = key.indices((self.max_idx() + 1)) if ((key.step is None) or (key.step > 0)): if (len(data) > len(range(*idx))): idx = (idx[0], (idx[0] + (idx[2] * len(data))), idx[2]) data_idx = (- 1) typelist = [] for i in range(*idx): data_idx += 1 if (data_idx >= len(data)): continue datum = data[data_idx] if (datum is None): typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types(datum, backward=self._cdf_file.backward) if (len(types) <= 0): raise ValueError('Cannot find a matching CDF type.') if (len(dims) > 1): raise ValueError('Entries must be scalar or 1D.') elif ((len(dims) == 1) and isinstance(datum[0], str_classes)): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): entry_type = self.type(i) if (not (entry_type in types)): entry_type = None if (entry_type is None): entry_type = self._check_other_entries(types) if ((entry_type is None) and (self.ENTRY_ == const.zENTRY_)): vartype = self._cdf_file[i].type() if (vartype in types): entry_type = vartype else: entry_type = types[0] elif (entry_type is None): entry_type = types[0] if (not (entry_type in lib.numpytypedict)): raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = (- 1) for i in range(*idx): data_idx += 1 if ((data_idx >= len(data)) or (data[data_idx] is None)): if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements)
7,629,561,312,053,855,000
Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'.
pycdf/__init__.py
__setitem__
cpiker/condaCDF
python
def __setitem__(self, key, data): "Set a slice of Entries.\n\n @param key: index or range of Entry numbers to set\n @type key: slice or int\n @param data: the data to set these entries to. Normally each entry should\n be a sequence; if a scalar is provided, it is treated\n as a single-element list.\n @type data: scalar or list\n @raise ValueError: if size of {data} does not match size of L{key}\n @note: Attributes do not 'grow' or 'shrink' as entries are added\n or removed. Indexes of entries never change and there is no\n way to 'insert'.\n " if (key is Ellipsis): key = slice(None, None, None) if (not hasattr(key, 'indices')): idx = (key, (key + 1), 1) data = [data] else: idx = key.indices((self.max_idx() + 1)) if ((key.step is None) or (key.step > 0)): if (len(data) > len(range(*idx))): idx = (idx[0], (idx[0] + (idx[2] * len(data))), idx[2]) data_idx = (- 1) typelist = [] for i in range(*idx): data_idx += 1 if (data_idx >= len(data)): continue datum = data[data_idx] if (datum is None): typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types(datum, backward=self._cdf_file.backward) if (len(types) <= 0): raise ValueError('Cannot find a matching CDF type.') if (len(dims) > 1): raise ValueError('Entries must be scalar or 1D.') elif ((len(dims) == 1) and isinstance(datum[0], str_classes)): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): entry_type = self.type(i) if (not (entry_type in types)): entry_type = None if (entry_type is None): entry_type = self._check_other_entries(types) if ((entry_type is None) and (self.ENTRY_ == const.zENTRY_)): vartype = self._cdf_file[i].type() if (vartype in types): entry_type = vartype else: entry_type = types[0] elif (entry_type is None): entry_type = types[0] if (not (entry_type in lib.numpytypedict)): raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = (- 1) for i in range(*idx): data_idx += 1 if ((data_idx >= len(data)) or (data[data_idx] is None)): if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements)
def __delitem__(self, key): "Delete a slice of Entries.\n\n @param key: index or range of Entry numbers to delete\n @type key: slice or int\n @note: Attributes do not 'grow' or 'shrink' as entries are added\n or removed. Indexes of entries never change and there is no\n way to 'insert'.\n " if (key is Ellipsis): key = slice(None, None, None) if (not hasattr(key, 'indices')): idx = (key, (key + 1), 1) else: idx = key.indices((self.max_idx() + 1)) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_)
-6,487,788,381,629,699,000
Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'.
pycdf/__init__.py
__delitem__
cpiker/condaCDF
python
def __delitem__(self, key): "Delete a slice of Entries.\n\n @param key: index or range of Entry numbers to delete\n @type key: slice or int\n @note: Attributes do not 'grow' or 'shrink' as entries are added\n or removed. Indexes of entries never change and there is no\n way to 'insert'.\n " if (key is Ellipsis): key = slice(None, None, None) if (not hasattr(key, 'indices')): idx = (key, (key + 1), 1) else: idx = key.indices((self.max_idx() + 1)) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_)
def __iter__(self, current=0): 'Iterates over all entries in this Attribute\n\n Returns data from one entry at a time until reaches the end.\n @note: Returned in entry-number order.\n ' while (current <= self.max_idx()): if self.has_entry(current): value = (yield self._get_entry(current)) if (value != None): current = value current += 1
1,073,632,310,754,900,200
Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order.
pycdf/__init__.py
__iter__
cpiker/condaCDF
python
def __iter__(self, current=0): 'Iterates over all entries in this Attribute\n\n Returns data from one entry at a time until reaches the end.\n @note: Returned in entry-number order.\n ' while (current <= self.max_idx()): if self.has_entry(current): value = (yield self._get_entry(current)) if (value != None): current = value current += 1
def __reversed__(self, current=None): 'Iterates over all entries in this Attribute\n\n Returns data from one entry at a time, starting at end and going\n to beginning.\n @note: Returned in entry-number order.\n ' if (current is None): current = self.max_idx() while (current >= 0): if self.has_entry(current): value = (yield self._get_entry(current)) if (value != None): current = value current -= 1
-2,169,726,939,541,378,600
Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order.
pycdf/__init__.py
__reversed__
cpiker/condaCDF
python
def __reversed__(self, current=None): 'Iterates over all entries in this Attribute\n\n Returns data from one entry at a time, starting at end and going\n to beginning.\n @note: Returned in entry-number order.\n ' if (current is None): current = self.max_idx() while (current >= 0): if self.has_entry(current): value = (yield self._get_entry(current)) if (value != None): current = value current -= 1
def __len__(self): 'Number of Entries for this Attr. NOT same as max Entry number.\n\n @return: Number of Entries\n @rtype: int\n ' count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value
-6,313,870,335,240,138,000
Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int
pycdf/__init__.py
__len__
cpiker/condaCDF
python
def __len__(self): 'Number of Entries for this Attr. NOT same as max Entry number.\n\n @return: Number of Entries\n @rtype: int\n ' count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value
def __repr__(self): "Returns representation of an attribute\n\n Cannot return anything that can be eval'd to create a copy of the\n attribtute, so just wrap the informal representation in angle brackets.\n @return: all the data in this attribute\n @rtype: str\n " return (('<\n' + str(self)) + '\n>')
-2,361,089,892,419,540,000
Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str
pycdf/__init__.py
__repr__
cpiker/condaCDF
python
def __repr__(self): "Returns representation of an attribute\n\n Cannot return anything that can be eval'd to create a copy of the\n attribtute, so just wrap the informal representation in angle brackets.\n @return: all the data in this attribute\n @rtype: str\n " return (('<\n' + str(self)) + '\n>')
def __str__(self): "Returns a string representation of the attribute\n\n This is an 'informal' representation in that it cannot be evaluated\n directly to create an L{Attr}.\n\n @return: all the data in this attribute\n @rtype: str\n " if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) elif isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format(self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format(self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii'))
3,565,218,746,426,250,000
Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str
pycdf/__init__.py
__str__
cpiker/condaCDF
python
def __str__(self): "Returns a string representation of the attribute\n\n This is an 'informal' representation in that it cannot be evaluated\n directly to create an L{Attr}.\n\n @return: all the data in this attribute\n @rtype: str\n " if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) elif isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format(self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format(self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii'))
def insert(self, index, data): 'Insert an entry at a particular number\n\n Inserts entry at particular number while moving all subsequent\n entries to one entry number later. Does not close gaps.\n\n Parameters\n ==========\n index : int\n index where to put the new entry\n data : \n data for the new entry\n ' max_entry = self.max_idx() if (index > max_entry): self[index] = data return for i in range(max_entry, (index - 1), (- 1)): if self.has_entry((i + 1)): self.__delitem__((i + 1)) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=(i + 1)) self[index] = data
-8,328,695,212,667,859,000
Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry
pycdf/__init__.py
insert
cpiker/condaCDF
python
def insert(self, index, data): 'Insert an entry at a particular number\n\n Inserts entry at particular number while moving all subsequent\n entries to one entry number later. Does not close gaps.\n\n Parameters\n ==========\n index : int\n index where to put the new entry\n data : \n data for the new entry\n ' max_entry = self.max_idx() if (index > max_entry): self[index] = data return for i in range(max_entry, (index - 1), (- 1)): if self.has_entry((i + 1)): self.__delitem__((i + 1)) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=(i + 1)) self[index] = data
def append(self, data): 'Add an entry to end of attribute\n\n Puts entry after last defined entry (does not fill gaps)\n\n Parameters\n ==========\n data : \n data for the new entry\n ' self[(self.max_idx() + 1)] = data
4,686,148,227,598,398,000
Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry
pycdf/__init__.py
append
cpiker/condaCDF
python
def append(self, data): 'Add an entry to end of attribute\n\n Puts entry after last defined entry (does not fill gaps)\n\n Parameters\n ==========\n data : \n data for the new entry\n ' self[(self.max_idx() + 1)] = data
def _call(self, *args, **kwargs): 'Select this CDF and Attr and call the CDF internal interface\n\n @param args: Passed directly to the CDF library interface.\n @type args: various, see :py:mod:`ctypes`.\n @return: CDF status from the library\n @rtype: ctypes.c_long\n @note: Terminal NULL_ is automatically added to L{args}.\n @raise CDFError: if CDF library reports an error\n @raise CDFWarning: if CDF library reports a warning and interpreter\n is set to error on warnings.\n ' return self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs)
6,537,549,419,326,152,000
Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings.
pycdf/__init__.py
_call
cpiker/condaCDF
python
def _call(self, *args, **kwargs): 'Select this CDF and Attr and call the CDF internal interface\n\n @param args: Passed directly to the CDF library interface.\n @type args: various, see :py:mod:`ctypes`.\n @return: CDF status from the library\n @rtype: ctypes.c_long\n @note: Terminal NULL_ is automatically added to L{args}.\n @raise CDFError: if CDF library reports an error\n @raise CDFWarning: if CDF library reports a warning and interpreter\n is set to error on warnings.\n ' return self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs)
def _entry_len(self, number): 'Number of elements in an Entry\n\n @param number: number of Entry\n @type number: int\n @return: number of elements\n @rtype: int\n ' if (not self.has_entry(number)): raise IndexError((('list index ' + str(number)) + ' out of range.')) count = ctypes.c_long(0) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value
-6,886,153,673,019,426,000
Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int
pycdf/__init__.py
_entry_len
cpiker/condaCDF
python
def _entry_len(self, number): 'Number of elements in an Entry\n\n @param number: number of Entry\n @type number: int\n @return: number of elements\n @rtype: int\n ' if (not self.has_entry(number)): raise IndexError((('list index ' + str(number)) + ' out of range.')) count = ctypes.c_long(0) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value
def type(self, number, new_type=None): "Find or change the CDF type of a particular Entry number\n\n Parameters\n ==========\n number : int\n number of Entry to check or change\n\n Other Parameters\n ================\n new_type\n type to change this Entry to, from :mod:`~pycdf.const`.\n Omit to only check type.\n\n Returns\n =======\n out : int\n CDF variable type, see :mod:`~pycdf.const`\n\n Notes\n =====\n If changing types, old and new must be equivalent, see CDF\n User's Guide section 2.5.5 pg. 57\n " if (new_type != None): if (not hasattr(new_type, 'value')): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if (status == const.NO_SUCH_ENTRY): raise IndexError((('list index ' + str(number)) + ' out of range.')) cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if (status == const.NO_SUCH_ENTRY): raise IndexError((('list index ' + str(number)) + ' out of range.')) return cdftype.value
7,315,560,073,932,372,000
Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57
pycdf/__init__.py
type
cpiker/condaCDF
python
def type(self, number, new_type=None): "Find or change the CDF type of a particular Entry number\n\n Parameters\n ==========\n number : int\n number of Entry to check or change\n\n Other Parameters\n ================\n new_type\n type to change this Entry to, from :mod:`~pycdf.const`.\n Omit to only check type.\n\n Returns\n =======\n out : int\n CDF variable type, see :mod:`~pycdf.const`\n\n Notes\n =====\n If changing types, old and new must be equivalent, see CDF\n User's Guide section 2.5.5 pg. 57\n " if (new_type != None): if (not hasattr(new_type, 'value')): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if (status == const.NO_SUCH_ENTRY): raise IndexError((('list index ' + str(number)) + ' out of range.')) cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if (status == const.NO_SUCH_ENTRY): raise IndexError((('list index ' + str(number)) + ' out of range.')) return cdftype.value
def has_entry(self, number): 'Check if this attribute has a particular Entry number\n\n Parameters\n ==========\n number : int\n number of Entry to check or change\n\n Returns\n =======\n out : bool\n True if ``number`` is a valid entry number; False if not\n ' status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY,)) return (not (status == const.NO_SUCH_ENTRY))
9,026,472,186,154,819,000
Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not
pycdf/__init__.py
has_entry
cpiker/condaCDF
python
def has_entry(self, number): 'Check if this attribute has a particular Entry number\n\n Parameters\n ==========\n number : int\n number of Entry to check or change\n\n Returns\n =======\n out : bool\n True if ``number`` is a valid entry number; False if not\n ' status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY,)) return (not (status == const.NO_SUCH_ENTRY))
def max_idx(self): 'Maximum index of Entries for this Attr\n\n Returns\n =======\n out : int\n maximum Entry number\n ' count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value
8,873,607,757,283,282,000
Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number
pycdf/__init__.py
max_idx
cpiker/condaCDF
python
def max_idx(self): 'Maximum index of Entries for this Attr\n\n Returns\n =======\n out : int\n maximum Entry number\n ' count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value
def new(self, data, type=None, number=None): 'Create a new Entry in this Attribute\n\n .. note:: If ``number`` is provided and an Entry with that number\n already exists, it will be overwritten.\n\n Parameters\n ==========\n data\n data to put in the Entry\n\n Other Parameters\n ================\n type : int\n type of the new Entry, from :mod:`~pycdf.const`\n (otherwise guessed from ``data``)\n number : int\n Entry number to write, default is lowest available number.\n ' if (number is None): number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types(data, backward=self._cdf_file.backward) if (type is None): type = self._check_other_entries(types) if ((type is None) and (self.ENTRY_ == const.zENTRY_)): vartype = self._cdf_file[number].type() if (vartype in types): type = vartype if (type is None): type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements)
658,827,983,585,143,700
Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number.
pycdf/__init__.py
new
cpiker/condaCDF
python
def new(self, data, type=None, number=None): 'Create a new Entry in this Attribute\n\n .. note:: If ``number`` is provided and an Entry with that number\n already exists, it will be overwritten.\n\n Parameters\n ==========\n data\n data to put in the Entry\n\n Other Parameters\n ================\n type : int\n type of the new Entry, from :mod:`~pycdf.const`\n (otherwise guessed from ``data``)\n number : int\n Entry number to write, default is lowest available number.\n ' if (number is None): number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types(data, backward=self._cdf_file.backward) if (type is None): type = self._check_other_entries(types) if ((type is None) and (self.ENTRY_ == const.zENTRY_)): vartype = self._cdf_file[number].type() if (vartype in types): type = vartype if (type is None): type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements)
def number(self): 'Find the attribute number for this attribute\n\n Returns\n =======\n out : int\n attribute number\n ' no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value
8,211,779,153,915,523,000
Find the attribute number for this attribute Returns ======= out : int attribute number
pycdf/__init__.py
number
cpiker/condaCDF
python
def number(self): 'Find the attribute number for this attribute\n\n Returns\n =======\n out : int\n attribute number\n ' no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value
def global_scope(self): 'Determine scope of this attribute.\n\n Returns\n =======\n out : bool\n True if global (i.e. gAttr), False if zAttr\n ' return self._cdf_file.attr_num(self._name)[1]
-504,907,318,242,927,500
Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr
pycdf/__init__.py
global_scope
cpiker/condaCDF
python
def global_scope(self): 'Determine scope of this attribute.\n\n Returns\n =======\n out : bool\n True if global (i.e. gAttr), False if zAttr\n ' return self._cdf_file.attr_num(self._name)[1]
def rename(self, new_name): 'Rename this attribute\n\n Renaming a zAttribute renames it for *all* zVariables in this CDF!\n\n Parameters\n ==========\n new_name : str\n the new name of the attribute\n ' try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if (len(enc_name) > const.CDF_ATTR_NAME_LEN256): raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache(enc_name, *self._cdf_file.attr_num(self._name)) del self._cdf_file._attr_info[self._name] self._name = enc_name
-8,552,523,158,711,291,000
Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute
pycdf/__init__.py
rename
cpiker/condaCDF
python
def rename(self, new_name): 'Rename this attribute\n\n Renaming a zAttribute renames it for *all* zVariables in this CDF!\n\n Parameters\n ==========\n new_name : str\n the new name of the attribute\n ' try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if (len(enc_name) > const.CDF_ATTR_NAME_LEN256): raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache(enc_name, *self._cdf_file.attr_num(self._name)) del self._cdf_file._attr_info[self._name] self._name = enc_name
def _get_entry(self, number): 'Read an Entry associated with this L{Attr}\n\n @param number: number of Entry to return\n @type number: int\n @return: data from entry numbered L{number}\n @rtype: list or str\n ' if (not self.has_entry(number)): raise IndexError((('list index ' + str(number)) + ' out of range.')) length = self._entry_len(number) cdftype = self.type(number) if (cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value)): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if (not (cdftype in lib.numpytypedict)): raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) if (cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value)): if ((str == bytes) or self._raw): result = bytes(buff) else: result = str(numpy.char.array(buff).decode()) else: if (not self._raw): if (cdftype == const.CDF_EPOCH.value): result = lib.v_epoch_to_datetime(buff) elif (cdftype == const.CDF_EPOCH16.value): result = lib.v_epoch16_to_datetime(buff) elif (cdftype == const.CDF_TIME_TT2000.value): result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if (length == 1): result = result[0] return result
-45,498,371,164,057,220
Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str
pycdf/__init__.py
_get_entry
cpiker/condaCDF
python
def _get_entry(self, number): 'Read an Entry associated with this L{Attr}\n\n @param number: number of Entry to return\n @type number: int\n @return: data from entry numbered L{number}\n @rtype: list or str\n ' if (not self.has_entry(number)): raise IndexError((('list index ' + str(number)) + ' out of range.')) length = self._entry_len(number) cdftype = self.type(number) if (cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value)): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if (not (cdftype in lib.numpytypedict)): raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) if (cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value)): if ((str == bytes) or self._raw): result = bytes(buff) else: result = str(numpy.char.array(buff).decode()) else: if (not self._raw): if (cdftype == const.CDF_EPOCH.value): result = lib.v_epoch_to_datetime(buff) elif (cdftype == const.CDF_EPOCH16.value): result = lib.v_epoch16_to_datetime(buff) elif (cdftype == const.CDF_TIME_TT2000.value): result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if (length == 1): result = result[0] return result
def _write_entry(self, number, data, cdf_type, dims, elements): 'Write an Entry to this Attr.\n\n @param number: number of Entry to write\n @type number: int\n @param data: data to write\n @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const`\n @param dims: dimensions of L{data}\n @type dims: list\n @param elements: number of elements in L{data}, 1 unless it is a string\n @type elements: int\n ' if (len(dims) == 0): n_write = 1 else: n_write = dims[0] if (cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value)): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype(('S' + str(elements)))) n_write = elements elif (cdf_type == const.CDF_EPOCH16.value): if (not self._raw): try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif (cdf_type == const.CDF_EPOCH.value): if (not self._raw): try: data = (lib.v_datetime_to_epoch(data),) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif (cdf_type == const.CDF_TIME_TT2000.value): if (not self._raw): try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif (cdf_type in lib.numpytypedict): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p))
6,380,333,655,318,013,000
Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int
pycdf/__init__.py
_write_entry
cpiker/condaCDF
python
def _write_entry(self, number, data, cdf_type, dims, elements): 'Write an Entry to this Attr.\n\n @param number: number of Entry to write\n @type number: int\n @param data: data to write\n @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const`\n @param dims: dimensions of L{data}\n @type dims: list\n @param elements: number of elements in L{data}, 1 unless it is a string\n @type elements: int\n ' if (len(dims) == 0): n_write = 1 else: n_write = dims[0] if (cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value)): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype(('S' + str(elements)))) n_write = elements elif (cdf_type == const.CDF_EPOCH16.value): if (not self._raw): try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif (cdf_type == const.CDF_EPOCH.value): if (not self._raw): try: data = (lib.v_datetime_to_epoch(data),) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif (cdf_type == const.CDF_TIME_TT2000.value): if (not self._raw): try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif (cdf_type in lib.numpytypedict): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p))
def _delete(self): 'Delete this Attribute\n\n Also deletes all Entries associated with it.\n ' self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None
9,112,160,062,937,282,000
Delete this Attribute Also deletes all Entries associated with it.
pycdf/__init__.py
_delete
cpiker/condaCDF
python
def _delete(self): 'Delete this Attribute\n\n Also deletes all Entries associated with it.\n ' self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None
def insert(self, index, data): 'Insert entry at particular index number\n\n Since there can only be one zEntry per zAttr, this cannot be\n implemented.\n\n Raises\n ======\n NotImplementedError : always\n ' raise NotImplementedError
-741,755,126,920,673,400
Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always
pycdf/__init__.py
insert
cpiker/condaCDF
python
def insert(self, index, data): 'Insert entry at particular index number\n\n Since there can only be one zEntry per zAttr, this cannot be\n implemented.\n\n Raises\n ======\n NotImplementedError : always\n ' raise NotImplementedError
def append(self, index, data): 'Add entry to end of attribute list\n\n Since there can only be one zEntry per zAttr, this cannot be\n implemented.\n\n Raises\n ======\n NotImplementedError : always\n ' raise NotImplementedError
-5,759,425,197,374,058,000
Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always
pycdf/__init__.py
append
cpiker/condaCDF
python
def append(self, index, data): 'Add entry to end of attribute list\n\n Since there can only be one zEntry per zAttr, this cannot be\n implemented.\n\n Raises\n ======\n NotImplementedError : always\n ' raise NotImplementedError
def __init__(self, cdf_file, special_entry=None): 'Initialize the attribute collection\n\n @param cdf_file: CDF these attributes are in\n @type cdf_file: :py:class:`pycdf.CDF`\n @param special_entry: callable which returns a "special" entry number,\n used to limit results for zAttrs to those which match the zVar\n (i.e. the var number)\n @type special_entry: callable\n ' self._cdf_file = cdf_file self.special_entry = special_entry
8,878,471,010,203,278,000
Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable
pycdf/__init__.py
__init__
cpiker/condaCDF
python
def __init__(self, cdf_file, special_entry=None): 'Initialize the attribute collection\n\n @param cdf_file: CDF these attributes are in\n @type cdf_file: :py:class:`pycdf.CDF`\n @param special_entry: callable which returns a "special" entry number,\n used to limit results for zAttrs to those which match the zVar\n (i.e. the var number)\n @type special_entry: callable\n ' self._cdf_file = cdf_file self.special_entry = special_entry
def __getitem__(self, name): 'Find an Attribute by name\n\n @param name: name of the Attribute to return\n @type name: str\n @return: attribute named L{name}\n @rtype: L{Attr}\n @raise KeyError: if there is no attribute named L{name}\n @raise CDFError: other errors in CDF library\n ' try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if (v.status == const.NO_SUCH_ATTR): raise KeyError(((name + ': ') + str(v))) else: raise if (attrib.global_scope() != self.global_scope): raise KeyError((((name + ': no ') + self.attr_name) + ' by that name.')) return attrib
9,057,941,628,773,863,000
Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library
pycdf/__init__.py
__getitem__
cpiker/condaCDF
python
def __getitem__(self, name): 'Find an Attribute by name\n\n @param name: name of the Attribute to return\n @type name: str\n @return: attribute named L{name}\n @rtype: L{Attr}\n @raise KeyError: if there is no attribute named L{name}\n @raise CDFError: other errors in CDF library\n ' try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if (v.status == const.NO_SUCH_ATTR): raise KeyError(((name + ': ') + str(v))) else: raise if (attrib.global_scope() != self.global_scope): raise KeyError((((name + ': no ') + self.attr_name) + ' by that name.')) return attrib
def __setitem__(self, name, data): 'Create an Attribute or change its entries\n\n @param name: name of Attribute to change\n @type name: str\n @param data: Entries to populate this Attribute with.\n Any existing Entries will be deleted!\n Another C{Attr} may be specified, in which\n case all its entries are copied.\n @type data: scalar, list, or L{Attr}\n ' if isinstance(data, AttrList): if (name in self): del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):]
-6,857,911,443,514,409,000
Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr}
pycdf/__init__.py
__setitem__
cpiker/condaCDF
python
def __setitem__(self, name, data): 'Create an Attribute or change its entries\n\n @param name: name of Attribute to change\n @type name: str\n @param data: Entries to populate this Attribute with.\n Any existing Entries will be deleted!\n Another C{Attr} may be specified, in which\n case all its entries are copied.\n @type data: scalar, list, or L{Attr}\n ' if isinstance(data, AttrList): if (name in self): del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):]
def __delitem__(self, name): 'Delete an Attribute (and all its entries)\n\n @param name: name of Attribute to delete\n @type name: str\n ' try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if (v.status == const.NO_SUCH_ATTR): raise KeyError(((name + ': ') + str(v))) else: raise if (attr.global_scope() != self.global_scope): raise KeyError(((name + ': not ') + self.attr_name)) attr._delete()
8,571,057,697,993,914,000
Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str
pycdf/__init__.py
__delitem__
cpiker/condaCDF
python
def __delitem__(self, name): 'Delete an Attribute (and all its entries)\n\n @param name: name of Attribute to delete\n @type name: str\n ' try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if (v.status == const.NO_SUCH_ATTR): raise KeyError(((name + ': ') + str(v))) else: raise if (attr.global_scope() != self.global_scope): raise KeyError(((name + ': not ') + self.attr_name)) attr._delete()
def __iter__(self, current=0): 'Iterates over all Attr in this CDF or variable\n\n Returns name of one L{Attr} at a time until reaches the end.\n @note: Returned in number order.\n ' count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while (current < count.value): candidate = self.AttrType(self._cdf_file, current) if (candidate.global_scope() == self.global_scope): if ((self.special_entry is None) or candidate.has_entry(self.special_entry())): if (str == bytes): value = (yield candidate._name) else: value = (yield candidate._name.decode()) if (value != None): current = self[value].number() current += 1
4,581,430,574,521,517,600
Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order.
pycdf/__init__.py
__iter__
cpiker/condaCDF
python
def __iter__(self, current=0): 'Iterates over all Attr in this CDF or variable\n\n Returns name of one L{Attr} at a time until reaches the end.\n @note: Returned in number order.\n ' count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while (current < count.value): candidate = self.AttrType(self._cdf_file, current) if (candidate.global_scope() == self.global_scope): if ((self.special_entry is None) or candidate.has_entry(self.special_entry())): if (str == bytes): value = (yield candidate._name) else: value = (yield candidate._name.decode()) if (value != None): current = self[value].number() current += 1
def __repr__(self): "Returns representation of attribute list\n\n Cannot return anything that can be eval'd to create a copy of the\n list, so just wrap the informal representation in angle brackets.\n @return: all the data in this list of attributes\n @rtype: str\n " return (((('<' + self.__class__.__name__) + ':\n') + str(self)) + '\n>')
-8,416,242,787,242,972,000
Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str
pycdf/__init__.py
__repr__
cpiker/condaCDF
python
def __repr__(self): "Returns representation of attribute list\n\n Cannot return anything that can be eval'd to create a copy of the\n list, so just wrap the informal representation in angle brackets.\n @return: all the data in this list of attributes\n @rtype: str\n " return (((('<' + self.__class__.__name__) + ':\n') + str(self)) + '\n>')
def __str__(self): "Returns a string representation of the attribute list\n\n This is an 'informal' representation in that it cannot be evaluated\n directly to create an L{AttrList}.\n\n @return: all the data in this list of attributes\n @rtype: str\n " if self._cdf_file._opened: return '\n'.join([((key + ': ') + (('\n' + (' ' * (len(key) + 2))).join([(((str(value[i]) + ' [') + lib.cdftypenames[value.type(i)]) + ']') for i in range((value.max_idx() + 1)) if value.has_entry(i)]) if isinstance(value, Attr) else (((str(value) + ' [') + lib.cdftypenames[self.type(key)]) + ']'))) for (key, value) in sorted(self.items())]) elif isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format(self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format(self._cdf_file.pathname.decode('ascii'))
8,633,169,659,997,861,000
Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str
pycdf/__init__.py
__str__
cpiker/condaCDF
python
def __str__(self): "Returns a string representation of the attribute list\n\n This is an 'informal' representation in that it cannot be evaluated\n directly to create an L{AttrList}.\n\n @return: all the data in this list of attributes\n @rtype: str\n " if self._cdf_file._opened: return '\n'.join([((key + ': ') + (('\n' + (' ' * (len(key) + 2))).join([(((str(value[i]) + ' [') + lib.cdftypenames[value.type(i)]) + ']') for i in range((value.max_idx() + 1)) if value.has_entry(i)]) if isinstance(value, Attr) else (((str(value) + ' [') + lib.cdftypenames[self.type(key)]) + ']'))) for (key, value) in sorted(self.items())]) elif isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format(self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format(self._cdf_file.pathname.decode('ascii'))
def clone(self, master, name=None, new_name=None): '\n Clones another attribute list, or one attribute from it, into this\n list.\n\n Parameters\n ==========\n master : AttrList\n the attribute list to copy from. This can be any dict-like object.\n\n Other Parameters\n ================\n name : str (optional)\n name of attribute to clone (default: clone entire list)\n new_name : str (optional)\n name of the new attribute, default ``name``\n ' if (name is None): self._clone_list(master) else: self._clone_attr(master, name, new_name)
-7,891,908,609,073,791,000
Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name``
pycdf/__init__.py
clone
cpiker/condaCDF
python
def clone(self, master, name=None, new_name=None): '\n Clones another attribute list, or one attribute from it, into this\n list.\n\n Parameters\n ==========\n master : AttrList\n the attribute list to copy from. This can be any dict-like object.\n\n Other Parameters\n ================\n name : str (optional)\n name of attribute to clone (default: clone entire list)\n new_name : str (optional)\n name of the new attribute, default ``name``\n ' if (name is None): self._clone_list(master) else: self._clone_attr(master, name, new_name)
def copy(self): '\n Create a copy of this attribute list\n\n Returns\n =======\n out : dict\n copy of the entries for all attributes in this list\n ' return dict(((key, (value[:] if isinstance(value, Attr) else value)) for (key, value) in self.items()))
6,992,348,364,719,334,000
Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list
pycdf/__init__.py
copy
cpiker/condaCDF
python
def copy(self): '\n Create a copy of this attribute list\n\n Returns\n =======\n out : dict\n copy of the entries for all attributes in this list\n ' return dict(((key, (value[:] if isinstance(value, Attr) else value)) for (key, value) in self.items()))
def new(self, name, data=None, type=None): '\n Create a new Attr in this AttrList\n\n Parameters\n ==========\n name : str\n name of the new Attribute\n\n Other Parameters\n ================\n data\n data to put into the first entry in the new Attribute\n type\n CDF type of the first entry from :mod:`~pycdf.const`.\n Only used if data are specified.\n\n Raises\n ======\n KeyError : if the name already exists in this list\n ' if (name in self): raise KeyError((name + ' already exists.')) attr = self._get_or_create(name) if (data is not None): if (self.special_entry is None): attr.new(data, type) else: attr.new(data, type, self.special_entry())
2,512,112,050,731,910,700
Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list
pycdf/__init__.py
new
cpiker/condaCDF
python
def new(self, name, data=None, type=None): '\n Create a new Attr in this AttrList\n\n Parameters\n ==========\n name : str\n name of the new Attribute\n\n Other Parameters\n ================\n data\n data to put into the first entry in the new Attribute\n type\n CDF type of the first entry from :mod:`~pycdf.const`.\n Only used if data are specified.\n\n Raises\n ======\n KeyError : if the name already exists in this list\n ' if (name in self): raise KeyError((name + ' already exists.')) attr = self._get_or_create(name) if (data is not None): if (self.special_entry is None): attr.new(data, type) else: attr.new(data, type, self.special_entry())
def rename(self, old_name, new_name): '\n Rename an attribute in this list\n\n Renaming a zAttribute renames it for *all* zVariables in this CDF!\n\n Parameters\n ==========\n old_name : str\n the current name of the attribute\n new_name : str\n the new name of the attribute\n ' AttrList.__getitem__(self, old_name).rename(new_name)
-8,892,377,292,410,608,000
Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute
pycdf/__init__.py
rename
cpiker/condaCDF
python
def rename(self, old_name, new_name): '\n Rename an attribute in this list\n\n Renaming a zAttribute renames it for *all* zVariables in this CDF!\n\n Parameters\n ==========\n old_name : str\n the current name of the attribute\n new_name : str\n the new name of the attribute\n ' AttrList.__getitem__(self, old_name).rename(new_name)
def from_dict(self, in_dict): '\n Fill this list of attributes from a dictionary\n\n .. deprecated:: 0.1.5\n Use :meth:`~pycdf.AttrList.clone` instead; it supports\n cloning from dictionaries.\n\n Parameters\n ==========\n in_dict : dict\n Attribute list is populated entirely from this dictionary;\n all existing attributes are deleted.\n ' warnings.warn('from_dict is deprecated and will be removed. Use clone.', DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if (not (k in in_dict)): del self[k]
1,740,887,903,258,037,800
Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted.
pycdf/__init__.py
from_dict
cpiker/condaCDF
python
def from_dict(self, in_dict): '\n Fill this list of attributes from a dictionary\n\n .. deprecated:: 0.1.5\n Use :meth:`~pycdf.AttrList.clone` instead; it supports\n cloning from dictionaries.\n\n Parameters\n ==========\n in_dict : dict\n Attribute list is populated entirely from this dictionary;\n all existing attributes are deleted.\n ' warnings.warn('from_dict is deprecated and will be removed. Use clone.', DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if (not (k in in_dict)): del self[k]
def _clone_attr(self, master, name, new_name=None): 'Clones a single attribute from one in this list or another\n\n Copies data and types from the master attribute to the new one\n\n @param master: attribute list to copy attribute from\n @type master: L{AttrList}\n @param name: name of attribute to copy\n @type name: str\n @param new_name: name of the new attribute, default L{name}\n @type new_name: str\n ' if (new_name is None): new_name = name self[new_name] = master[name]
4,593,757,647,274,060,300
Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str
pycdf/__init__.py
_clone_attr
cpiker/condaCDF
python
def _clone_attr(self, master, name, new_name=None): 'Clones a single attribute from one in this list or another\n\n Copies data and types from the master attribute to the new one\n\n @param master: attribute list to copy attribute from\n @type master: L{AttrList}\n @param name: name of attribute to copy\n @type name: str\n @param new_name: name of the new attribute, default L{name}\n @type new_name: str\n ' if (new_name is None): new_name = name self[new_name] = master[name]
def _clone_list(self, master): 'Clones this attribute list from another\n\n @param master: the attribute list to copy from\n @type master: L{AttrList}\n ' for name in master: self._clone_attr(master, name) for name in list(self): if (not (name in master)): del self[name]
1,735,739,518,964,436,200
Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList}
pycdf/__init__.py
_clone_list
cpiker/condaCDF
python
def _clone_list(self, master): 'Clones this attribute list from another\n\n @param master: the attribute list to copy from\n @type master: L{AttrList}\n ' for name in master: self._clone_attr(master, name) for name in list(self): if (not (name in master)): del self[name]
def _get_or_create(self, name): "Retrieve L{Attr} or create it if it doesn't exist\n\n @param name: name of the attribute to look up or create\n @type name: str\n @return: attribute with this name\n @rtype: L{Attr}\n " attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if (v.status != const.NO_SUCH_ATTR): raise if (attr is None): attr = self.AttrType(self._cdf_file, name, True) elif (attr.global_scope() != self.global_scope): raise KeyError(((name + ': not ') + self.attr_name)) return attr
-3,932,049,490,588,742,700
Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr}
pycdf/__init__.py
_get_or_create
cpiker/condaCDF
python
def _get_or_create(self, name): "Retrieve L{Attr} or create it if it doesn't exist\n\n @param name: name of the attribute to look up or create\n @type name: str\n @return: attribute with this name\n @rtype: L{Attr}\n " attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if (v.status != const.NO_SUCH_ATTR): raise if (attr is None): attr = self.AttrType(self._cdf_file, name, True) elif (attr.global_scope() != self.global_scope): raise KeyError(((name + ': not ') + self.attr_name)) return attr
def __len__(self): '\n Number of gAttributes in this CDF\n\n Returns\n =======\n out : int\n number of gAttributes in the CDF\n ' count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value
1,307,565,771,030,060,800
Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF
pycdf/__init__.py
__len__
cpiker/condaCDF
python
def __len__(self): '\n Number of gAttributes in this CDF\n\n Returns\n =======\n out : int\n number of gAttributes in the CDF\n ' count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value
def __init__(self, zvar): 'Initialize the attribute collection\n\n @param zvar: zVariable these attributes are in\n @param zvar: :py:class:`pycdf.Var`\n ' super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar
6,613,242,305,066,177,000
Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var`
pycdf/__init__.py
__init__
cpiker/condaCDF
python
def __init__(self, zvar): 'Initialize the attribute collection\n\n @param zvar: zVariable these attributes are in\n @param zvar: :py:class:`pycdf.Var`\n ' super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar
def __getitem__(self, name): 'Find an zEntry by name\n\n @param name: name of the zAttribute to return\n @type name: str\n @return: attribute named L{name}\n @rtype: L{zAttr}\n @raise KeyError: if there is no attribute named L{name} associated\n with this zVariable\n @raise CDFError: other errors in CDF library\n ' attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(((name + ': no such attribute for variable ') + self._zvar.name()))
3,835,158,616,355,306,500
Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library
pycdf/__init__.py
__getitem__
cpiker/condaCDF
python
def __getitem__(self, name): 'Find an zEntry by name\n\n @param name: name of the zAttribute to return\n @type name: str\n @return: attribute named L{name}\n @rtype: L{zAttr}\n @raise KeyError: if there is no attribute named L{name} associated\n with this zVariable\n @raise CDFError: other errors in CDF library\n ' attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(((name + ': no such attribute for variable ') + self._zvar.name()))
def __delitem__(self, name): 'Delete an zEntry by name\n\n @param name: name of the zEntry to delete\n @type name: str\n @raise KeyError: if there is no attribute named L{name} associated\n with this zVariable\n @raise CDFError: other errors in CDF library\n @note: If this is the only remaining entry, the Attribute will be\n deleted.\n ' attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if (not attrib.has_entry(zvar_num)): raise KeyError(((str(name) + ': no such attribute for variable ') + str(self._zvar._name))) del attrib[zvar_num] if (len(attrib) == 0): attrib._delete()
4,448,042,545,029,496,300
Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted.
pycdf/__init__.py
__delitem__
cpiker/condaCDF
python
def __delitem__(self, name): 'Delete an zEntry by name\n\n @param name: name of the zEntry to delete\n @type name: str\n @raise KeyError: if there is no attribute named L{name} associated\n with this zVariable\n @raise CDFError: other errors in CDF library\n @note: If this is the only remaining entry, the Attribute will be\n deleted.\n ' attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if (not attrib.has_entry(zvar_num)): raise KeyError(((str(name) + ': no such attribute for variable ') + str(self._zvar._name))) del attrib[zvar_num] if (len(attrib) == 0): attrib._delete()
def __setitem__(self, name, data): 'Sets a zEntry by name\n\n The type of the zEntry is guessed from L{data}. The type is chosen to\n match the data; subject to that constraint, it will try to match\n (in order):\n 1. existing zEntry corresponding to this zVar\n 2. other zEntries in this zAttribute\n 3. the type of this zVar\n 4. data-matching constraints described in L{_Hyperslice.types}\n\n @param name: name of zAttribute; zEntry for this zVariable will be set\n in zAttribute by this name\n @type name: str\n @raise CDFError: errors in CDF library\n @raise ValueError: if unable to find a valid CDF type matching L{data},\n or if L{data} is the wrong dimensions.\n ' try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data
-8,386,760,686,779,896,000
Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions.
pycdf/__init__.py
__setitem__
cpiker/condaCDF
python
def __setitem__(self, name, data): 'Sets a zEntry by name\n\n The type of the zEntry is guessed from L{data}. The type is chosen to\n match the data; subject to that constraint, it will try to match\n (in order):\n 1. existing zEntry corresponding to this zVar\n 2. other zEntries in this zAttribute\n 3. the type of this zVar\n 4. data-matching constraints described in L{_Hyperslice.types}\n\n @param name: name of zAttribute; zEntry for this zVariable will be set\n in zAttribute by this name\n @type name: str\n @raise CDFError: errors in CDF library\n @raise ValueError: if unable to find a valid CDF type matching L{data},\n or if L{data} is the wrong dimensions.\n ' try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data
def __len__(self): 'Number of zAttributes in this variable\n\n @return: number of zAttributes in the CDF\n which have entries for this variable.\n @rtype: int\n ' length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while (current < count.value): candidate = zAttr(self._cdf_file, current) if (not candidate.global_scope()): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length
4,828,795,206,108,122,000
Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int
pycdf/__init__.py
__len__
cpiker/condaCDF
python
def __len__(self): 'Number of zAttributes in this variable\n\n @return: number of zAttributes in the CDF\n which have entries for this variable.\n @rtype: int\n ' length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while (current < count.value): candidate = zAttr(self._cdf_file, current) if (not candidate.global_scope()): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length
def type(self, name, new_type=None): "Find or change the CDF type of a zEntry in this zVar\n\n @param name: name of the zAttr to check or change\n @type name: str\n @param new_type: type to change it to, see :py:mod:`pycdf.const`\n @type new_type: ctypes.c_long\n @return: CDF variable type, see :py:mod:`pycdf.const`\n @rtype: int\n @note: If changing types, old and new must be equivalent, see CDF\n User's Guide section 2.5.5 pg. 57\n " attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if (not attrib.has_entry(zvar_num)): raise KeyError(((name + ': no such attribute for variable ') + self._zvar.name())) return attrib.type(zvar_num, new_type)
683,384,987,073,141,900
Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57
pycdf/__init__.py
type
cpiker/condaCDF
python
def type(self, name, new_type=None): "Find or change the CDF type of a zEntry in this zVar\n\n @param name: name of the zAttr to check or change\n @type name: str\n @param new_type: type to change it to, see :py:mod:`pycdf.const`\n @type new_type: ctypes.c_long\n @return: CDF variable type, see :py:mod:`pycdf.const`\n @rtype: int\n @note: If changing types, old and new must be equivalent, see CDF\n User's Guide section 2.5.5 pg. 57\n " attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if (not attrib.has_entry(zvar_num)): raise KeyError(((name + ': no such attribute for variable ') + self._zvar.name())) return attrib.type(zvar_num, new_type)
def _clone_attr(self, master, name, new_name=None): 'Clones a single attribute from one in this list or another\n\n Copies data and types from the master attribute to the new one\n\n @param master: attribute list to copy attribute from\n @type master: L{zAttrList}\n @param name: name of attribute to copy\n @type name: str\n @param new_name: name of the new attribute, default L{name}\n @type new_name: str\n ' if (new_name is None): new_name = name if (new_name in self): del self[new_name] self.new(new_name, master[name], (master.type(name) if hasattr(master, 'type') else None))
6,926,953,596,347,258,000
Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str
pycdf/__init__.py
_clone_attr
cpiker/condaCDF
python
def _clone_attr(self, master, name, new_name=None): 'Clones a single attribute from one in this list or another\n\n Copies data and types from the master attribute to the new one\n\n @param master: attribute list to copy attribute from\n @type master: L{zAttrList}\n @param name: name of attribute to copy\n @type name: str\n @param new_name: name of the new attribute, default L{name}\n @type new_name: str\n ' if (new_name is None): new_name = name if (new_name in self): del self[new_name] self.new(new_name, master[name], (master.type(name) if hasattr(master, 'type') else None))
def _GetUserInit(allow_privileged): 'Gets user-init metadata value for COS image.' allow_privileged_val = ('true' if allow_privileged else 'false') return (USER_INIT_TEMPLATE % allow_privileged_val)
-9,055,961,458,231,454,000
Gets user-init metadata value for COS image.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_GetUserInit
bopopescu/JobSniperRails
python
def _GetUserInit(allow_privileged): allow_privileged_val = ('true' if allow_privileged else 'false') return (USER_INIT_TEMPLATE % allow_privileged_val)
def ValidateUserMetadata(metadata): 'Validates if user-specified metadata.\n\n Checks if it contains values which may conflict with container deployment.\n Args:\n metadata: user-specified VM metadata.\n\n Raises:\n InvalidMetadataKeyException: if there is conflict with user-provided\n metadata\n ' for entry in metadata.items: if (entry.key in [USER_DATA_KEY, CONTAINER_MANIFEST_KEY, GKE_DOCKER]): raise InvalidMetadataKeyException(entry.key)
2,603,807,631,856,674,300
Validates if user-specified metadata. Checks if it contains values which may conflict with container deployment. Args: metadata: user-specified VM metadata. Raises: InvalidMetadataKeyException: if there is conflict with user-provided metadata
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
ValidateUserMetadata
bopopescu/JobSniperRails
python
def ValidateUserMetadata(metadata): 'Validates if user-specified metadata.\n\n Checks if it contains values which may conflict with container deployment.\n Args:\n metadata: user-specified VM metadata.\n\n Raises:\n InvalidMetadataKeyException: if there is conflict with user-provided\n metadata\n ' for entry in metadata.items: if (entry.key in [USER_DATA_KEY, CONTAINER_MANIFEST_KEY, GKE_DOCKER]): raise InvalidMetadataKeyException(entry.key)
def CreateTagsMessage(messages, tags): 'Create tags message with parameters for container VM or VM templates.' if tags: return messages.Tags(items=tags)
7,419,058,106,280,878,000
Create tags message with parameters for container VM or VM templates.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
CreateTagsMessage
bopopescu/JobSniperRails
python
def CreateTagsMessage(messages, tags): if tags: return messages.Tags(items=tags)
def GetLabelsMessageWithCosVersion(labels, image_uri, resources, resource_class): 'Returns message with labels for instance / instance template.\n\n Args:\n labels: dict, labels to assign to the resource.\n image_uri: URI of image used as a base for the resource. The function\n extracts COS version from the URI and uses it as a value of\n `container-vm` label.\n resources: object that can parse image_uri.\n resource_class: class of the resource to which labels will be assigned.\n Must contain LabelsValue class and\n resource_class.LabelsValue must contain AdditionalProperty\n class.\n ' cos_version = resources.Parse(image_uri, collection='compute.images').Name().replace('/', '-') if (labels is None): labels = {} labels['container-vm'] = cos_version additional_properties = [resource_class.LabelsValue.AdditionalProperty(key=k, value=v) for (k, v) in sorted(six.iteritems(labels))] return resource_class.LabelsValue(additionalProperties=additional_properties)
-5,469,485,270,292,029,000
Returns message with labels for instance / instance template. Args: labels: dict, labels to assign to the resource. image_uri: URI of image used as a base for the resource. The function extracts COS version from the URI and uses it as a value of `container-vm` label. resources: object that can parse image_uri. resource_class: class of the resource to which labels will be assigned. Must contain LabelsValue class and resource_class.LabelsValue must contain AdditionalProperty class.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
GetLabelsMessageWithCosVersion
bopopescu/JobSniperRails
python
def GetLabelsMessageWithCosVersion(labels, image_uri, resources, resource_class): 'Returns message with labels for instance / instance template.\n\n Args:\n labels: dict, labels to assign to the resource.\n image_uri: URI of image used as a base for the resource. The function\n extracts COS version from the URI and uses it as a value of\n `container-vm` label.\n resources: object that can parse image_uri.\n resource_class: class of the resource to which labels will be assigned.\n Must contain LabelsValue class and\n resource_class.LabelsValue must contain AdditionalProperty\n class.\n ' cos_version = resources.Parse(image_uri, collection='compute.images').Name().replace('/', '-') if (labels is None): labels = {} labels['container-vm'] = cos_version additional_properties = [resource_class.LabelsValue.AdditionalProperty(key=k, value=v) for (k, v) in sorted(six.iteritems(labels))] return resource_class.LabelsValue(additionalProperties=additional_properties)
def ExpandCosImageFlag(compute_client): 'Select a COS image to run Docker.' compute = compute_client.apitools_client images = compute_client.MakeRequests([(compute.images, 'List', compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT))]) return _SelectNewestCosImage(images)
6,538,690,885,852,462,000
Select a COS image to run Docker.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
ExpandCosImageFlag
bopopescu/JobSniperRails
python
def ExpandCosImageFlag(compute_client): compute = compute_client.apitools_client images = compute_client.MakeRequests([(compute.images, 'List', compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT))]) return _SelectNewestCosImage(images)
def _SelectNewestCosImage(images): 'Selects newest COS image from the list.' cos_images = sorted([image for image in images if image.name.startswith(COS_MAJOR_RELEASE)], key=(lambda x: times.ParseDateTime(x.creationTimestamp))) if (not cos_images): raise NoCosImageException() return cos_images[(- 1)].selfLink
1,021,848,419,324,734,500
Selects newest COS image from the list.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_SelectNewestCosImage
bopopescu/JobSniperRails
python
def _SelectNewestCosImage(images): cos_images = sorted([image for image in images if image.name.startswith(COS_MAJOR_RELEASE)], key=(lambda x: times.ParseDateTime(x.creationTimestamp))) if (not cos_images): raise NoCosImageException() return cos_images[(- 1)].selfLink
def _ValidateAndParsePortMapping(port_mappings): 'Parses and validates port mapping.' ports_config = [] for port_mapping in port_mappings: mapping_match = re.match('^(\\d+):(\\d+):(\\S+)$', port_mapping) if (not mapping_match): raise calliope_exceptions.InvalidArgumentException('--port-mappings', 'Port mappings should follow PORT:TARGET_PORT:PROTOCOL format.') (port, target_port, protocol) = mapping_match.groups() if (protocol not in ALLOWED_PROTOCOLS): raise calliope_exceptions.InvalidArgumentException('--port-mappings', 'Protocol should be one of [{0}]'.format(', '.join(ALLOWED_PROTOCOLS))) ports_config.append({'containerPort': int(target_port), 'hostPort': int(port), 'protocol': protocol}) return ports_config
1,751,126,288,136,252,000
Parses and validates port mapping.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_ValidateAndParsePortMapping
bopopescu/JobSniperRails
python
def _ValidateAndParsePortMapping(port_mappings): ports_config = [] for port_mapping in port_mappings: mapping_match = re.match('^(\\d+):(\\d+):(\\S+)$', port_mapping) if (not mapping_match): raise calliope_exceptions.InvalidArgumentException('--port-mappings', 'Port mappings should follow PORT:TARGET_PORT:PROTOCOL format.') (port, target_port, protocol) = mapping_match.groups() if (protocol not in ALLOWED_PROTOCOLS): raise calliope_exceptions.InvalidArgumentException('--port-mappings', 'Protocol should be one of [{0}]'.format(', '.join(ALLOWED_PROTOCOLS))) ports_config.append({'containerPort': int(target_port), 'hostPort': int(port), 'protocol': protocol}) return ports_config
def ExpandKonletCosImageFlag(compute_client): 'Select a COS image to run Konlet.\n\n This function scans three families in order:\n - stable\n - beta\n - dev\n looking for the first image with version at least _MIN_PREFERRED_COS_VERSION.\n\n Args:\n compute_client: ClientAdapter, The Compute API client adapter\n\n Returns:\n COS image at version _MIN_PREFERRED_COS_VERSION or later.\n\n Raises:\n NoCosImageException: No COS image at version at least\n _MIN_PREFERRED_COS_VERSION was found. This should not happen if backend is\n healthy.\n ' compute = compute_client.apitools_client images = compute_client.MakeRequests([(compute.images, 'List', compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT))]) name_re_template = 'cos-{}-(\\d+)-.*' image_families = ['stable', 'beta', 'dev'] for family in image_families: name_re = name_re_template.format(family) def MakeCreateComparisonKey(name_re): def CreateComparisonKey(image): version = int(re.match(name_re, image.name).group(1)) timestamp = times.ParseDateTime(image.creationTimestamp) return (version, timestamp) return CreateComparisonKey cos_images = sorted([image for image in images if re.match(name_re, image.name)], key=MakeCreateComparisonKey(name_re)) if (cos_images and (MakeCreateComparisonKey(name_re)(cos_images[(- 1)])[0] >= _MIN_PREFERRED_COS_VERSION)): return cos_images[(- 1)].selfLink raise NoCosImageException()
-6,864,573,768,492,050,000
Select a COS image to run Konlet. This function scans three families in order: - stable - beta - dev looking for the first image with version at least _MIN_PREFERRED_COS_VERSION. Args: compute_client: ClientAdapter, The Compute API client adapter Returns: COS image at version _MIN_PREFERRED_COS_VERSION or later. Raises: NoCosImageException: No COS image at version at least _MIN_PREFERRED_COS_VERSION was found. This should not happen if backend is healthy.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
ExpandKonletCosImageFlag
bopopescu/JobSniperRails
python
def ExpandKonletCosImageFlag(compute_client): 'Select a COS image to run Konlet.\n\n This function scans three families in order:\n - stable\n - beta\n - dev\n looking for the first image with version at least _MIN_PREFERRED_COS_VERSION.\n\n Args:\n compute_client: ClientAdapter, The Compute API client adapter\n\n Returns:\n COS image at version _MIN_PREFERRED_COS_VERSION or later.\n\n Raises:\n NoCosImageException: No COS image at version at least\n _MIN_PREFERRED_COS_VERSION was found. This should not happen if backend is\n healthy.\n ' compute = compute_client.apitools_client images = compute_client.MakeRequests([(compute.images, 'List', compute_client.messages.ComputeImagesListRequest(project=COS_PROJECT))]) name_re_template = 'cos-{}-(\\d+)-.*' image_families = ['stable', 'beta', 'dev'] for family in image_families: name_re = name_re_template.format(family) def MakeCreateComparisonKey(name_re): def CreateComparisonKey(image): version = int(re.match(name_re, image.name).group(1)) timestamp = times.ParseDateTime(image.creationTimestamp) return (version, timestamp) return CreateComparisonKey cos_images = sorted([image for image in images if re.match(name_re, image.name)], key=MakeCreateComparisonKey(name_re)) if (cos_images and (MakeCreateComparisonKey(name_re)(cos_images[(- 1)])[0] >= _MIN_PREFERRED_COS_VERSION)): return cos_images[(- 1)].selfLink raise NoCosImageException()
def _ReadDictionary(filename): 'Read environment variable from file.\n\n File format:\n\n It is intended (but not guaranteed) to follow standard docker format\n [](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file)\n but without capturing environment variables from host machine.\n Lines starting by "#" character are comments.\n Empty lines are ignored.\n Below grammar production follow in EBNF format.\n\n file = (whitespace* statement \'\\n\')*\n statement = comment\n | definition\n whitespace = \' \'\n | \'\\t\'\n comment = \'#\' [^\\n]*\n definition = [^#=\\n] [^= \\t\\n]* \'=\' [^\\n]*\n\n Args:\n filename: str, name of the file to read\n\n Returns:\n A dictionary mapping environment variable names to their values.\n ' env_vars = {} if (not filename): return env_vars with files.FileReader(filename) as f: for (i, line) in enumerate(f): line = line.strip() if ((len(line) <= 1) or (line[0] == '#')): continue assignment_op_loc = line.find('=') if (assignment_op_loc == (- 1)): raise calliope_exceptions.BadFileException('Syntax error in {}:{}: Expected VAR=VAL, got {}'.format(filename, i, line)) env = line[:assignment_op_loc] val = line[(assignment_op_loc + 1):] if ((' ' in env) or ('\t' in env)): raise calliope_exceptions.BadFileException('Syntax error in {}:{} Variable name cannot contain whitespaces, got "{}"'.format(filename, i, env)) env_vars[env] = val return env_vars
7,033,727,727,017,680,000
Read environment variable from file. File format: It is intended (but not guaranteed) to follow standard docker format [](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) but without capturing environment variables from host machine. Lines starting by "#" character are comments. Empty lines are ignored. Below grammar production follow in EBNF format. file = (whitespace* statement '\n')* statement = comment | definition whitespace = ' ' | '\t' comment = '#' [^\n]* definition = [^#=\n] [^= \t\n]* '=' [^\n]* Args: filename: str, name of the file to read Returns: A dictionary mapping environment variable names to their values.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_ReadDictionary
bopopescu/JobSniperRails
python
def _ReadDictionary(filename): 'Read environment variable from file.\n\n File format:\n\n It is intended (but not guaranteed) to follow standard docker format\n [](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file)\n but without capturing environment variables from host machine.\n Lines starting by "#" character are comments.\n Empty lines are ignored.\n Below grammar production follow in EBNF format.\n\n file = (whitespace* statement \'\\n\')*\n statement = comment\n | definition\n whitespace = \' \'\n | \'\\t\'\n comment = \'#\' [^\\n]*\n definition = [^#=\\n] [^= \\t\\n]* \'=\' [^\\n]*\n\n Args:\n filename: str, name of the file to read\n\n Returns:\n A dictionary mapping environment variable names to their values.\n ' env_vars = {} if (not filename): return env_vars with files.FileReader(filename) as f: for (i, line) in enumerate(f): line = line.strip() if ((len(line) <= 1) or (line[0] == '#')): continue assignment_op_loc = line.find('=') if (assignment_op_loc == (- 1)): raise calliope_exceptions.BadFileException('Syntax error in {}:{}: Expected VAR=VAL, got {}'.format(filename, i, line)) env = line[:assignment_op_loc] val = line[(assignment_op_loc + 1):] if ((' ' in env) or ('\t' in env)): raise calliope_exceptions.BadFileException('Syntax error in {}:{} Variable name cannot contain whitespaces, got "{}"'.format(filename, i, env)) env_vars[env] = val return env_vars
def _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts, used_names=None, disks=None): 'Add volume specs from --container-mount-disk.' used_names = (used_names or []) disks = (disks or []) idx = 0 for mount_disk in container_mount_disk: while (_GetPersistentDiskName(idx) in used_names): idx += 1 device_name = mount_disk.get('name') partition = mount_disk.get('partition') def _GetMatchingVolume(device_name, partition): for volume_spec in volumes: pd = volume_spec.get('gcePersistentDisk', {}) if ((pd.get('pdName') == device_name) and (pd.get('partition') == partition)): return volume_spec repeated = _GetMatchingVolume(device_name, partition) if repeated: name = repeated['name'] else: name = _GetPersistentDiskName(idx) used_names.append(name) if (not device_name): if (len(disks) != 1): raise calliope_exceptions.InvalidArgumentException('--container-mount-disk', 'Must specify the name of the disk to be mounted unless exactly one disk is attached to the instance.') device_name = disks[0].get('name') if (disks[0].get('device-name', device_name) != device_name): raise exceptions.InvalidArgumentException('--container-mount-disk', 'Must not have a device-name that is different from disk name if disk is being attached to the instance and mounted to a container: [{}]'.format(disks[0].get('device-name'))) volume_mounts.append({'name': name, 'mountPath': mount_disk['mount-path'], 'readOnly': mount_disk.get('mode', _DEFAULT_MODE).isReadOnly()}) if repeated: continue volume_spec = {'name': name, 'gcePersistentDisk': {'pdName': device_name, 'fsType': 'ext4'}} if partition: volume_spec['gcePersistentDisk'].update({'partition': partition}) volumes.append(volume_spec) idx += 1
-7,435,622,479,589,784,000
Add volume specs from --container-mount-disk.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_AddMountedDisksToManifest
bopopescu/JobSniperRails
python
def _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts, used_names=None, disks=None): used_names = (used_names or []) disks = (disks or []) idx = 0 for mount_disk in container_mount_disk: while (_GetPersistentDiskName(idx) in used_names): idx += 1 device_name = mount_disk.get('name') partition = mount_disk.get('partition') def _GetMatchingVolume(device_name, partition): for volume_spec in volumes: pd = volume_spec.get('gcePersistentDisk', {}) if ((pd.get('pdName') == device_name) and (pd.get('partition') == partition)): return volume_spec repeated = _GetMatchingVolume(device_name, partition) if repeated: name = repeated['name'] else: name = _GetPersistentDiskName(idx) used_names.append(name) if (not device_name): if (len(disks) != 1): raise calliope_exceptions.InvalidArgumentException('--container-mount-disk', 'Must specify the name of the disk to be mounted unless exactly one disk is attached to the instance.') device_name = disks[0].get('name') if (disks[0].get('device-name', device_name) != device_name): raise exceptions.InvalidArgumentException('--container-mount-disk', 'Must not have a device-name that is different from disk name if disk is being attached to the instance and mounted to a container: [{}]'.format(disks[0].get('device-name'))) volume_mounts.append({'name': name, 'mountPath': mount_disk['mount-path'], 'readOnly': mount_disk.get('mode', _DEFAULT_MODE).isReadOnly()}) if repeated: continue volume_spec = {'name': name, 'gcePersistentDisk': {'pdName': device_name, 'fsType': 'ext4'}} if partition: volume_spec['gcePersistentDisk'].update({'partition': partition}) volumes.append(volume_spec) idx += 1
def _CreateContainerManifest(args, instance_name, container_mount_disk_enabled=False, container_mount_disk=None): 'Create container manifest from argument namespace and instance name.' container = {'image': args.container_image, 'name': instance_name} if (args.container_command is not None): container['command'] = [args.container_command] if (args.container_arg is not None): container['args'] = args.container_arg container['stdin'] = args.container_stdin container['tty'] = args.container_tty container['securityContext'] = {'privileged': args.container_privileged} env_vars = _ReadDictionary(args.container_env_file) for env_var_dict in (args.container_env or []): for (env, val) in six.iteritems(env_var_dict): env_vars[env] = val if env_vars: container['env'] = [{'name': env, 'value': val} for (env, val) in six.iteritems(env_vars)] volumes = [] volume_mounts = [] for (idx, volume) in enumerate((args.container_mount_host_path or [])): volumes.append({'name': _GetHostPathDiskName(idx), 'hostPath': {'path': volume['host-path']}}) volume_mounts.append({'name': _GetHostPathDiskName(idx), 'mountPath': volume['mount-path'], 'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()}) for (idx, tmpfs) in enumerate((args.container_mount_tmpfs or [])): volumes.append({'name': _GetTmpfsDiskName(idx), 'emptyDir': {'medium': 'Memory'}}) volume_mounts.append({'name': _GetTmpfsDiskName(idx), 'mountPath': tmpfs['mount-path']}) if container_mount_disk_enabled: container_mount_disk = (container_mount_disk or []) disks = ((args.disk or []) + (args.create_disk or [])) _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts, disks=disks) container['volumeMounts'] = volume_mounts manifest = {'spec': {'containers': [container], 'volumes': volumes, 'restartPolicy': RESTART_POLICY_API[args.container_restart_policy]}} return manifest
-7,928,636,929,376,096,000
Create container manifest from argument namespace and instance name.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_CreateContainerManifest
bopopescu/JobSniperRails
python
def _CreateContainerManifest(args, instance_name, container_mount_disk_enabled=False, container_mount_disk=None): container = {'image': args.container_image, 'name': instance_name} if (args.container_command is not None): container['command'] = [args.container_command] if (args.container_arg is not None): container['args'] = args.container_arg container['stdin'] = args.container_stdin container['tty'] = args.container_tty container['securityContext'] = {'privileged': args.container_privileged} env_vars = _ReadDictionary(args.container_env_file) for env_var_dict in (args.container_env or []): for (env, val) in six.iteritems(env_var_dict): env_vars[env] = val if env_vars: container['env'] = [{'name': env, 'value': val} for (env, val) in six.iteritems(env_vars)] volumes = [] volume_mounts = [] for (idx, volume) in enumerate((args.container_mount_host_path or [])): volumes.append({'name': _GetHostPathDiskName(idx), 'hostPath': {'path': volume['host-path']}}) volume_mounts.append({'name': _GetHostPathDiskName(idx), 'mountPath': volume['mount-path'], 'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()}) for (idx, tmpfs) in enumerate((args.container_mount_tmpfs or [])): volumes.append({'name': _GetTmpfsDiskName(idx), 'emptyDir': {'medium': 'Memory'}}) volume_mounts.append({'name': _GetTmpfsDiskName(idx), 'mountPath': tmpfs['mount-path']}) if container_mount_disk_enabled: container_mount_disk = (container_mount_disk or []) disks = ((args.disk or []) + (args.create_disk or [])) _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts, disks=disks) container['volumeMounts'] = volume_mounts manifest = {'spec': {'containers': [container], 'volumes': volumes, 'restartPolicy': RESTART_POLICY_API[args.container_restart_policy]}} return manifest
def DumpYaml(data): 'Dumps data dict to YAML in format expected by Konlet.' return (MANIFEST_DISCLAIMER + yaml.dump(data))
-8,401,266,490,021,556,000
Dumps data dict to YAML in format expected by Konlet.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
DumpYaml
bopopescu/JobSniperRails
python
def DumpYaml(data): return (MANIFEST_DISCLAIMER + yaml.dump(data))
def _CreateYamlContainerManifest(args, instance_name, container_mount_disk_enabled=False, container_mount_disk=None): 'Helper to create the container manifest.' return DumpYaml(_CreateContainerManifest(args, instance_name, container_mount_disk_enabled=container_mount_disk_enabled, container_mount_disk=container_mount_disk))
7,151,514,943,391,603,000
Helper to create the container manifest.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_CreateYamlContainerManifest
bopopescu/JobSniperRails
python
def _CreateYamlContainerManifest(args, instance_name, container_mount_disk_enabled=False, container_mount_disk=None): return DumpYaml(_CreateContainerManifest(args, instance_name, container_mount_disk_enabled=container_mount_disk_enabled, container_mount_disk=container_mount_disk))
def CreateKonletMetadataMessage(messages, args, instance_name, user_metadata, container_mount_disk_enabled=False, container_mount_disk=None): 'Helper to create the metadata for konlet.' konlet_metadata = {GCE_CONTAINER_DECLARATION: _CreateYamlContainerManifest(args, instance_name, container_mount_disk_enabled=container_mount_disk_enabled, container_mount_disk=container_mount_disk), STACKDRIVER_LOGGING_AGENT_CONFIGURATION: 'true'} return metadata_utils.ConstructMetadataMessage(messages, metadata=konlet_metadata, existing_metadata=user_metadata)
8,358,982,699,455,497,000
Helper to create the metadata for konlet.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
CreateKonletMetadataMessage
bopopescu/JobSniperRails
python
def CreateKonletMetadataMessage(messages, args, instance_name, user_metadata, container_mount_disk_enabled=False, container_mount_disk=None): konlet_metadata = {GCE_CONTAINER_DECLARATION: _CreateYamlContainerManifest(args, instance_name, container_mount_disk_enabled=container_mount_disk_enabled, container_mount_disk=container_mount_disk), STACKDRIVER_LOGGING_AGENT_CONFIGURATION: 'true'} return metadata_utils.ConstructMetadataMessage(messages, metadata=konlet_metadata, existing_metadata=user_metadata)
def UpdateInstance(holder, client, instance_ref, instance, args, container_mount_disk_enabled=False, container_mount_disk=None): 'Update an instance and its container metadata.' for metadata in instance.metadata.items: if (metadata.key == GCE_CONTAINER_DECLARATION): UpdateMetadata(holder, metadata, args, instance, container_mount_disk_enabled=container_mount_disk_enabled, container_mount_disk=container_mount_disk) operation = client.apitools_client.instances.SetMetadata(client.messages.ComputeInstancesSetMetadataRequest(metadata=instance.metadata, **instance_ref.AsDict())) operation_ref = holder.resources.Parse(operation.selfLink, collection='compute.zoneOperations') operation_poller = poller.Poller(client.apitools_client.instances) set_metadata_waiter = waiter.WaitFor(operation_poller, operation_ref, 'Updating specification of container [{0}]'.format(instance_ref.Name())) if (instance.status == client.messages.Instance.StatusValueValuesEnum.TERMINATED): return set_metadata_waiter elif (instance.status == client.messages.Instance.StatusValueValuesEnum.SUSPENDED): return _StopVm(holder, client, instance_ref) else: _StopVm(holder, client, instance_ref) return _StartVm(holder, client, instance_ref) raise NoGceContainerDeclarationMetadataKey()
-4,849,458,390,674,768,000
Update an instance and its container metadata.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
UpdateInstance
bopopescu/JobSniperRails
python
def UpdateInstance(holder, client, instance_ref, instance, args, container_mount_disk_enabled=False, container_mount_disk=None): for metadata in instance.metadata.items: if (metadata.key == GCE_CONTAINER_DECLARATION): UpdateMetadata(holder, metadata, args, instance, container_mount_disk_enabled=container_mount_disk_enabled, container_mount_disk=container_mount_disk) operation = client.apitools_client.instances.SetMetadata(client.messages.ComputeInstancesSetMetadataRequest(metadata=instance.metadata, **instance_ref.AsDict())) operation_ref = holder.resources.Parse(operation.selfLink, collection='compute.zoneOperations') operation_poller = poller.Poller(client.apitools_client.instances) set_metadata_waiter = waiter.WaitFor(operation_poller, operation_ref, 'Updating specification of container [{0}]'.format(instance_ref.Name())) if (instance.status == client.messages.Instance.StatusValueValuesEnum.TERMINATED): return set_metadata_waiter elif (instance.status == client.messages.Instance.StatusValueValuesEnum.SUSPENDED): return _StopVm(holder, client, instance_ref) else: _StopVm(holder, client, instance_ref) return _StartVm(holder, client, instance_ref) raise NoGceContainerDeclarationMetadataKey()
def _StopVm(holder, client, instance_ref): 'Stop the Virtual Machine.' operation = client.apitools_client.instances.Stop(client.messages.ComputeInstancesStopRequest(**instance_ref.AsDict())) operation_ref = holder.resources.Parse(operation.selfLink, collection='compute.zoneOperations') operation_poller = poller.Poller(client.apitools_client.instances) return waiter.WaitFor(operation_poller, operation_ref, 'Stopping instance [{0}]'.format(instance_ref.Name()))
-966,852,758,988,458,400
Stop the Virtual Machine.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_StopVm
bopopescu/JobSniperRails
python
def _StopVm(holder, client, instance_ref): operation = client.apitools_client.instances.Stop(client.messages.ComputeInstancesStopRequest(**instance_ref.AsDict())) operation_ref = holder.resources.Parse(operation.selfLink, collection='compute.zoneOperations') operation_poller = poller.Poller(client.apitools_client.instances) return waiter.WaitFor(operation_poller, operation_ref, 'Stopping instance [{0}]'.format(instance_ref.Name()))
def _StartVm(holder, client, instance_ref): 'Start the Virtual Machine.' operation = client.apitools_client.instances.Start(client.messages.ComputeInstancesStartRequest(**instance_ref.AsDict())) operation_ref = holder.resources.Parse(operation.selfLink, collection='compute.zoneOperations') operation_poller = poller.Poller(client.apitools_client.instances) return waiter.WaitFor(operation_poller, operation_ref, 'Starting instance [{0}]'.format(instance_ref.Name()))
6,301,015,718,287,988,000
Start the Virtual Machine.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_StartVm
bopopescu/JobSniperRails
python
def _StartVm(holder, client, instance_ref): operation = client.apitools_client.instances.Start(client.messages.ComputeInstancesStartRequest(**instance_ref.AsDict())) operation_ref = holder.resources.Parse(operation.selfLink, collection='compute.zoneOperations') operation_poller = poller.Poller(client.apitools_client.instances) return waiter.WaitFor(operation_poller, operation_ref, 'Starting instance [{0}]'.format(instance_ref.Name()))
def UpdateMetadata(holder, metadata, args, instance, container_mount_disk_enabled=False, container_mount_disk=None): 'Update konlet metadata entry using user-supplied data.' manifest = yaml.load(metadata.value) if args.IsSpecified('container_image'): manifest['spec']['containers'][0]['image'] = args.container_image if args.IsSpecified('container_command'): manifest['spec']['containers'][0]['command'] = [args.container_command] if args.IsSpecified('clear_container_command'): manifest['spec']['containers'][0].pop('command', None) if args.IsSpecified('container_arg'): manifest['spec']['containers'][0]['args'] = args.container_arg if args.IsSpecified('clear_container_args'): manifest['spec']['containers'][0].pop('args', None) if (args.container_privileged is True): manifest['spec']['containers'][0]['securityContext']['privileged'] = True if (args.container_privileged is False): manifest['spec']['containers'][0]['securityContext']['privileged'] = False if container_mount_disk_enabled: container_mount_disk = (container_mount_disk or []) disks = instance.disks else: container_mount_disk = [] disks = [] _UpdateMounts(holder, manifest, (args.remove_container_mounts or []), (args.container_mount_host_path or []), (args.container_mount_tmpfs or []), container_mount_disk, disks) _UpdateEnv(manifest, itertools.chain.from_iterable((args.remove_container_env or [])), args.container_env_file, (args.container_env or [])) if (args.container_stdin is True): manifest['spec']['containers'][0]['stdin'] = True if (args.container_stdin is False): manifest['spec']['containers'][0]['stdin'] = False if (args.container_tty is True): manifest['spec']['containers'][0]['tty'] = True if (args.container_tty is False): manifest['spec']['containers'][0]['tty'] = False if args.IsSpecified('container_restart_policy'): manifest['spec']['restartPolicy'] = RESTART_POLICY_API[args.container_restart_policy] metadata.value = DumpYaml(manifest)
6,245,330,564,401,157,000
Update konlet metadata entry using user-supplied data.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
UpdateMetadata
bopopescu/JobSniperRails
python
def UpdateMetadata(holder, metadata, args, instance, container_mount_disk_enabled=False, container_mount_disk=None): manifest = yaml.load(metadata.value) if args.IsSpecified('container_image'): manifest['spec']['containers'][0]['image'] = args.container_image if args.IsSpecified('container_command'): manifest['spec']['containers'][0]['command'] = [args.container_command] if args.IsSpecified('clear_container_command'): manifest['spec']['containers'][0].pop('command', None) if args.IsSpecified('container_arg'): manifest['spec']['containers'][0]['args'] = args.container_arg if args.IsSpecified('clear_container_args'): manifest['spec']['containers'][0].pop('args', None) if (args.container_privileged is True): manifest['spec']['containers'][0]['securityContext']['privileged'] = True if (args.container_privileged is False): manifest['spec']['containers'][0]['securityContext']['privileged'] = False if container_mount_disk_enabled: container_mount_disk = (container_mount_disk or []) disks = instance.disks else: container_mount_disk = [] disks = [] _UpdateMounts(holder, manifest, (args.remove_container_mounts or []), (args.container_mount_host_path or []), (args.container_mount_tmpfs or []), container_mount_disk, disks) _UpdateEnv(manifest, itertools.chain.from_iterable((args.remove_container_env or [])), args.container_env_file, (args.container_env or [])) if (args.container_stdin is True): manifest['spec']['containers'][0]['stdin'] = True if (args.container_stdin is False): manifest['spec']['containers'][0]['stdin'] = False if (args.container_tty is True): manifest['spec']['containers'][0]['tty'] = True if (args.container_tty is False): manifest['spec']['containers'][0]['tty'] = False if args.IsSpecified('container_restart_policy'): manifest['spec']['restartPolicy'] = RESTART_POLICY_API[args.container_restart_policy] metadata.value = DumpYaml(manifest)
def _UpdateMounts(holder, manifest, remove_container_mounts, container_mount_host_path, container_mount_tmpfs, container_mount_disk, disks): 'Updates mounts in container manifest.' _CleanupMounts(manifest, remove_container_mounts, container_mount_host_path, container_mount_tmpfs, container_mount_disk=container_mount_disk) used_names = [volume['name'] for volume in manifest['spec']['volumes']] volumes = [] volume_mounts = [] next_volume_index = 0 for volume in container_mount_host_path: while (_GetHostPathDiskName(next_volume_index) in used_names): next_volume_index += 1 name = _GetHostPathDiskName(next_volume_index) next_volume_index += 1 volumes.append({'name': name, 'hostPath': {'path': volume['host-path']}}) volume_mounts.append({'name': name, 'mountPath': volume['mount-path'], 'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()}) for tmpfs in container_mount_tmpfs: while (_GetTmpfsDiskName(next_volume_index) in used_names): next_volume_index += 1 name = _GetTmpfsDiskName(next_volume_index) next_volume_index += 1 volumes.append({'name': name, 'emptyDir': {'medium': 'Memory'}}) volume_mounts.append({'name': name, 'mountPath': tmpfs['mount-path']}) if container_mount_disk: disks = [{'device-name': disk.deviceName, 'name': holder.resources.Parse(disk.source).Name()} for disk in disks] _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts, used_names=used_names, disks=disks) manifest['spec']['containers'][0]['volumeMounts'].extend(volume_mounts) manifest['spec']['volumes'].extend(volumes)
3,652,365,875,387,263,500
Updates mounts in container manifest.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_UpdateMounts
bopopescu/JobSniperRails
python
def _UpdateMounts(holder, manifest, remove_container_mounts, container_mount_host_path, container_mount_tmpfs, container_mount_disk, disks): _CleanupMounts(manifest, remove_container_mounts, container_mount_host_path, container_mount_tmpfs, container_mount_disk=container_mount_disk) used_names = [volume['name'] for volume in manifest['spec']['volumes']] volumes = [] volume_mounts = [] next_volume_index = 0 for volume in container_mount_host_path: while (_GetHostPathDiskName(next_volume_index) in used_names): next_volume_index += 1 name = _GetHostPathDiskName(next_volume_index) next_volume_index += 1 volumes.append({'name': name, 'hostPath': {'path': volume['host-path']}}) volume_mounts.append({'name': name, 'mountPath': volume['mount-path'], 'readOnly': volume.get('mode', _DEFAULT_MODE).isReadOnly()}) for tmpfs in container_mount_tmpfs: while (_GetTmpfsDiskName(next_volume_index) in used_names): next_volume_index += 1 name = _GetTmpfsDiskName(next_volume_index) next_volume_index += 1 volumes.append({'name': name, 'emptyDir': {'medium': 'Memory'}}) volume_mounts.append({'name': name, 'mountPath': tmpfs['mount-path']}) if container_mount_disk: disks = [{'device-name': disk.deviceName, 'name': holder.resources.Parse(disk.source).Name()} for disk in disks] _AddMountedDisksToManifest(container_mount_disk, volumes, volume_mounts, used_names=used_names, disks=disks) manifest['spec']['containers'][0]['volumeMounts'].extend(volume_mounts) manifest['spec']['volumes'].extend(volumes)
def _CleanupMounts(manifest, remove_container_mounts, container_mount_host_path, container_mount_tmpfs, container_mount_disk=None): 'Remove all specified mounts from container manifest.' container_mount_disk = (container_mount_disk or []) mount_paths_to_remove = remove_container_mounts[:] for host_path in container_mount_host_path: mount_paths_to_remove.append(host_path['mount-path']) for tmpfs in container_mount_tmpfs: mount_paths_to_remove.append(tmpfs['mount-path']) for disk in container_mount_disk: mount_paths_to_remove.append(disk['mount-path']) used_mounts = [] used_mounts_names = [] removed_mount_names = [] for mount in manifest['spec']['containers'][0].get('volumeMounts', []): if (mount['mountPath'] not in mount_paths_to_remove): used_mounts.append(mount) used_mounts_names.append(mount['name']) else: removed_mount_names.append(mount['name']) manifest['spec']['containers'][0]['volumeMounts'] = used_mounts used_volumes = [] for volume in manifest['spec'].get('volumes', []): if ((volume['name'] in used_mounts_names) or (volume['name'] not in removed_mount_names)): used_volumes.append(volume) manifest['spec']['volumes'] = used_volumes
1,538,221,688,976,093,400
Remove all specified mounts from container manifest.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_CleanupMounts
bopopescu/JobSniperRails
python
def _CleanupMounts(manifest, remove_container_mounts, container_mount_host_path, container_mount_tmpfs, container_mount_disk=None): container_mount_disk = (container_mount_disk or []) mount_paths_to_remove = remove_container_mounts[:] for host_path in container_mount_host_path: mount_paths_to_remove.append(host_path['mount-path']) for tmpfs in container_mount_tmpfs: mount_paths_to_remove.append(tmpfs['mount-path']) for disk in container_mount_disk: mount_paths_to_remove.append(disk['mount-path']) used_mounts = [] used_mounts_names = [] removed_mount_names = [] for mount in manifest['spec']['containers'][0].get('volumeMounts', []): if (mount['mountPath'] not in mount_paths_to_remove): used_mounts.append(mount) used_mounts_names.append(mount['name']) else: removed_mount_names.append(mount['name']) manifest['spec']['containers'][0]['volumeMounts'] = used_mounts used_volumes = [] for volume in manifest['spec'].get('volumes', []): if ((volume['name'] in used_mounts_names) or (volume['name'] not in removed_mount_names)): used_volumes.append(volume) manifest['spec']['volumes'] = used_volumes
def _UpdateEnv(manifest, remove_container_env, container_env_file, container_env): 'Update environment variables in container manifest.' current_env = {} for env_val in manifest['spec']['containers'][0].get('env', []): current_env[env_val['name']] = env_val['value'] for env in remove_container_env: current_env.pop(env, None) current_env.update(_ReadDictionary(container_env_file)) for env_var_dict in container_env: for (env, val) in six.iteritems(env_var_dict): current_env[env] = val if current_env: manifest['spec']['containers'][0]['env'] = [{'name': env, 'value': val} for (env, val) in six.iteritems(current_env)]
5,229,501,345,475,472,000
Update environment variables in container manifest.
gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/containers_utils.py
_UpdateEnv
bopopescu/JobSniperRails
python
def _UpdateEnv(manifest, remove_container_env, container_env_file, container_env): current_env = {} for env_val in manifest['spec']['containers'][0].get('env', []): current_env[env_val['name']] = env_val['value'] for env in remove_container_env: current_env.pop(env, None) current_env.update(_ReadDictionary(container_env_file)) for env_var_dict in container_env: for (env, val) in six.iteritems(env_var_dict): current_env[env] = val if current_env: manifest['spec']['containers'][0]['env'] = [{'name': env, 'value': val} for (env, val) in six.iteritems(current_env)]
def SaveNewActiveConfig(config): 'Serialize and saves the configuration as new active config file.' json_config = (json.dumps(config, indent=2, separators=(',', ': '), sort_keys=True) + '\n') json_name = ('umpire.%s.json' % hashlib.md5(json_config.encode('utf-8')).hexdigest()) json_path = os.path.join('resources', json_name) with open(os.path.join(_ENV_DIR, json_path), 'w') as f: f.write(json_config) os.unlink(_CONFIG_PATH) os.symlink(json_path, _CONFIG_PATH)
4,456,873,084,780,837,000
Serialize and saves the configuration as new active config file.
py/umpire/server/migrations/0010.py
SaveNewActiveConfig
arccode/factory
python
def SaveNewActiveConfig(config): json_config = (json.dumps(config, indent=2, separators=(',', ': '), sort_keys=True) + '\n') json_name = ('umpire.%s.json' % hashlib.md5(json_config.encode('utf-8')).hexdigest()) json_path = os.path.join('resources', json_name) with open(os.path.join(_ENV_DIR, json_path), 'w') as f: f.write(json_config) os.unlink(_CONFIG_PATH) os.symlink(json_path, _CONFIG_PATH)
def list(self, resource_group_name, virtual_network_name, subnet_name, **kwargs): 'Gets a list of service association links for a subnet.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_network_name: The name of the virtual network.\n :type virtual_network_name: str\n :param subnet_name: The name of the subnet.\n :type subnet_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ServiceAssociationLinksListResult, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_04_01.models.ServiceAssociationLinksListResult\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-04-01' accept = 'application/json' url = self.list.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url('virtual_network_name', virtual_network_name, 'str'), 'subnetName': self._serialize.url('subnet_name', subnet_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
3,377,442,895,401,322,000
Gets a list of service association links for a subnet. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param subnet_name: The name of the subnet. :type subnet_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ServiceAssociationLinksListResult, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_04_01.models.ServiceAssociationLinksListResult :raises: ~azure.core.exceptions.HttpResponseError
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_service_association_links_operations.py
list
4thel00z/microsoft-crap-that-doesnt-work
python
def list(self, resource_group_name, virtual_network_name, subnet_name, **kwargs): 'Gets a list of service association links for a subnet.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_network_name: The name of the virtual network.\n :type virtual_network_name: str\n :param subnet_name: The name of the subnet.\n :type subnet_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: ServiceAssociationLinksListResult, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2019_04_01.models.ServiceAssociationLinksListResult\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = '2019-04-01' accept = 'application/json' url = self.list.metadata['url'] path_format_arguments = {'resourceGroupName': self._serialize.url('resource_group_name', resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url('virtual_network_name', virtual_network_name, 'str'), 'subnetName': self._serialize.url('subnet_name', subnet_name, 'str'), 'subscriptionId': self._serialize.url('self._config.subscription_id', self._config.subscription_id, 'str')} url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query('api_version', api_version, 'str') header_parameters = {} header_parameters['Accept'] = self._serialize.header('accept', accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@yaz.task(choice__choices=['yes', 'no', 'unknown']) def required_choice(self, choice): 'This is the documentation for the required_choice task' return self.choices[choice]
5,762,619,045,657,838,000
This is the documentation for the required_choice task
yaz/test/test_task_configuration.py
required_choice
boudewijn-zicht/yaz
python
@yaz.task(choice__choices=['yes', 'no', 'unknown']) def required_choice(self, choice): return self.choices[choice]
@yaz.task def one_line_doc_string(self): 'This is the documentation for the one_line_doc_string task' pass
-5,348,532,464,028,702,000
This is the documentation for the one_line_doc_string task
yaz/test/test_task_configuration.py
one_line_doc_string
boudewijn-zicht/yaz
python
@yaz.task def one_line_doc_string(self): pass
@yaz.task def multi_line_doc_string(self): '\n This is the documentation for the multi_line_doc_string task\n\n This is the long description, for example:\n bla bla,\n etc...\n ' pass
-7,576,571,247,808,248,000
This is the documentation for the multi_line_doc_string task This is the long description, for example: bla bla, etc...
yaz/test/test_task_configuration.py
multi_line_doc_string
boudewijn-zicht/yaz
python
@yaz.task def multi_line_doc_string(self): '\n This is the documentation for the multi_line_doc_string task\n\n This is the long description, for example:\n bla bla,\n etc...\n ' pass
@yaz.task(choice__help='This is the documentation for the choice parameter of the parameter_help task') def parameter_help(self, choice): 'This is the documentation for the parameter_help task' pass
5,651,753,683,150,569,000
This is the documentation for the parameter_help task
yaz/test/test_task_configuration.py
parameter_help
boudewijn-zicht/yaz
python
@yaz.task(choice__help='This is the documentation for the choice parameter of the parameter_help task') def parameter_help(self, choice): pass
def test_010_plugin_help(self): 'Should show plugin help texts from docstring or configuration' caller = self.get_caller([ConfigurationPlugin]) with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout: with self.assertRaises(SystemExit): caller('--help') stdout.seek(0) output = stdout.read() print(output) self.assertRegex(output, 'This is the documentation string for the ConfigurationPlugin') self.assertRegex(output, 'This is the documentation for the required_choice task') self.assertRegex(output, 'This is the documentation for the one_line_doc_string task') self.assertRegex(output, 'This is the documentation for the parameter_help task') self.assertRegex(output, 'This is the documentation for the multi_line_doc_string task') self.assertNotRegex(output, 'This is the long description, for example')
398,901,382,774,434,300
Should show plugin help texts from docstring or configuration
yaz/test/test_task_configuration.py
test_010_plugin_help
boudewijn-zicht/yaz
python
def test_010_plugin_help(self): caller = self.get_caller([ConfigurationPlugin]) with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout: with self.assertRaises(SystemExit): caller('--help') stdout.seek(0) output = stdout.read() print(output) self.assertRegex(output, 'This is the documentation string for the ConfigurationPlugin') self.assertRegex(output, 'This is the documentation for the required_choice task') self.assertRegex(output, 'This is the documentation for the one_line_doc_string task') self.assertRegex(output, 'This is the documentation for the parameter_help task') self.assertRegex(output, 'This is the documentation for the multi_line_doc_string task') self.assertNotRegex(output, 'This is the long description, for example')
def test_020_task_help__docstring(self): 'Should show task help texts from docstring or configuration' caller = self.get_caller([ConfigurationPlugin]) with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout: with self.assertRaises(SystemExit): caller('multi-line-doc-string', '--help') stdout.seek(0) output = stdout.read() print(output) self.assertNotRegex(output, 'This is the documentation string for the ConfigurationPlugin') self.assertRegex(output, 'This is the documentation for the multi_line_doc_string task') self.assertRegex(output, 'This is the long description, for example')
893,196,513,617,972,100
Should show task help texts from docstring or configuration
yaz/test/test_task_configuration.py
test_020_task_help__docstring
boudewijn-zicht/yaz
python
def test_020_task_help__docstring(self): caller = self.get_caller([ConfigurationPlugin]) with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout: with self.assertRaises(SystemExit): caller('multi-line-doc-string', '--help') stdout.seek(0) output = stdout.read() print(output) self.assertNotRegex(output, 'This is the documentation string for the ConfigurationPlugin') self.assertRegex(output, 'This is the documentation for the multi_line_doc_string task') self.assertRegex(output, 'This is the long description, for example')
def test_030_task_help__parameter(self): 'Should show task help texts from docstring or configuration' caller = self.get_caller([ConfigurationPlugin]) with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout: with self.assertRaises(SystemExit): caller('parameter-help', '--help') stdout.seek(0) output = stdout.read() print(output) self.assertNotRegex(output, 'This is the documentation string for the ConfigurationPlugin') self.assertRegex(output, 'This is the documentation for the parameter_help task') self.assertRegex(output, 'This is the documentation for the choice parameter of the\\n.*parameter_help task')
-6,580,420,490,779,309,000
Should show task help texts from docstring or configuration
yaz/test/test_task_configuration.py
test_030_task_help__parameter
boudewijn-zicht/yaz
python
def test_030_task_help__parameter(self): caller = self.get_caller([ConfigurationPlugin]) with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout: with self.assertRaises(SystemExit): caller('parameter-help', '--help') stdout.seek(0) output = stdout.read() print(output) self.assertNotRegex(output, 'This is the documentation string for the ConfigurationPlugin') self.assertRegex(output, 'This is the documentation for the parameter_help task') self.assertRegex(output, 'This is the documentation for the choice parameter of the\\n.*parameter_help task')
def test_040_choices_configuration(self): 'Should accept predefined choices' caller = self.get_caller([ConfigurationPlugin]) self.assertTrue(caller('required-choice', 'yes')) with unittest.mock.patch('sys.stderr', new=io.StringIO()): with self.assertRaises(SystemExit): caller('required-choice', 'unavailable')
-8,754,675,025,409,857,000
Should accept predefined choices
yaz/test/test_task_configuration.py
test_040_choices_configuration
boudewijn-zicht/yaz
python
def test_040_choices_configuration(self): caller = self.get_caller([ConfigurationPlugin]) self.assertTrue(caller('required-choice', 'yes')) with unittest.mock.patch('sys.stderr', new=io.StringIO()): with self.assertRaises(SystemExit): caller('required-choice', 'unavailable')
def rzz(self, theta, ctl, tgt): 'Apply RZZ to circuit.' if (isinstance(ctl, QuantumRegister) and isinstance(tgt, QuantumRegister) and (len(ctl) == len(tgt))): instructions = InstructionSet() for i in range(ctl.size): instructions.add(self.rzz(theta, (ctl, i), (tgt, i))) return instructions self._check_qubit(ctl) self._check_qubit(tgt) self._check_dups([ctl, tgt]) return self._attach(RZZGate(theta, ctl, tgt, self))
6,492,097,230,245,868,000
Apply RZZ to circuit.
qiskit/extensions/standard/rzz.py
rzz
christians94/qiskit-sdk-py
python
def rzz(self, theta, ctl, tgt): if (isinstance(ctl, QuantumRegister) and isinstance(tgt, QuantumRegister) and (len(ctl) == len(tgt))): instructions = InstructionSet() for i in range(ctl.size): instructions.add(self.rzz(theta, (ctl, i), (tgt, i))) return instructions self._check_qubit(ctl) self._check_qubit(tgt) self._check_dups([ctl, tgt]) return self._attach(RZZGate(theta, ctl, tgt, self))
def __init__(self, theta, ctl, tgt, circ=None): 'Create new rzz gate.' super().__init__('rzz', [theta], [ctl, tgt], circ)
7,741,249,714,310,723,000
Create new rzz gate.
qiskit/extensions/standard/rzz.py
__init__
christians94/qiskit-sdk-py
python
def __init__(self, theta, ctl, tgt, circ=None): super().__init__('rzz', [theta], [ctl, tgt], circ)
def qasm(self): 'Return OPENQASM string.' ctl = self.arg[0] tgt = self.arg[1] theta = self.param[0] return self._qasmif(('rzz(%s) %s[%d],%s[%d];' % (theta, ctl[0].name, ctl[1], tgt[0].name, tgt[1])))
6,049,907,832,970,613,000
Return OPENQASM string.
qiskit/extensions/standard/rzz.py
qasm
christians94/qiskit-sdk-py
python
def qasm(self): ctl = self.arg[0] tgt = self.arg[1] theta = self.param[0] return self._qasmif(('rzz(%s) %s[%d],%s[%d];' % (theta, ctl[0].name, ctl[1], tgt[0].name, tgt[1])))
def inverse(self): 'Invert this gate.' self.param[0] = (- self.param[0]) return self
-6,188,600,865,367,955,000
Invert this gate.
qiskit/extensions/standard/rzz.py
inverse
christians94/qiskit-sdk-py
python
def inverse(self): self.param[0] = (- self.param[0]) return self
def reapply(self, circ): 'Reapply this gate to corresponding qubits in circ.' self._modifiers(circ.rzz(self.param[0], self.arg[0], self.arg[1]))
-5,694,214,718,569,381,000
Reapply this gate to corresponding qubits in circ.
qiskit/extensions/standard/rzz.py
reapply
christians94/qiskit-sdk-py
python
def reapply(self, circ): self._modifiers(circ.rzz(self.param[0], self.arg[0], self.arg[1]))
def _projection_simplex(v, z=1): '\n Old implementation for test and benchmark purposes.\n The arguments v and z should be a vector and a scalar, respectively.\n ' n_features = v.shape[0] u = np.sort(v)[::(- 1)] cssv = (np.cumsum(u) - z) ind = (np.arange(n_features) + 1) cond = ((u - (cssv / ind)) > 0) rho = ind[cond][(- 1)] theta = (cssv[cond][(- 1)] / float(rho)) w = np.maximum((v - theta), 0) return w
2,615,208,293,314,532,000
Old implementation for test and benchmark purposes. The arguments v and z should be a vector and a scalar, respectively.
smoothot/tests/test_projection.py
_projection_simplex
cptq/smooth-ot
python
def _projection_simplex(v, z=1): '\n Old implementation for test and benchmark purposes.\n The arguments v and z should be a vector and a scalar, respectively.\n ' n_features = v.shape[0] u = np.sort(v)[::(- 1)] cssv = (np.cumsum(u) - z) ind = (np.arange(n_features) + 1) cond = ((u - (cssv / ind)) > 0) rho = ind[cond][(- 1)] theta = (cssv[cond][(- 1)] / float(rho)) w = np.maximum((v - theta), 0) return w