code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def same_log10_order_of_magnitude(x, delta=0.1): """ Return true if range is approximately in same order of magnitude For example these sequences are in the same order of magnitude: - [1, 8, 5] # [1, 10) - [35, 20, 80] # [10 100) - [232, 730] # [100, 1000) Parameters ---------- x : array-like Values in base 10. Must be size 2 and ``rng[0] <= rng[1]``. delta : float Fuzz factor for approximation. It is multiplicative. """ dmin = np.log10(np.min(x)*(1-delta)) dmax = np.log10(np.max(x)*(1+delta)) return np.floor(dmin) == np.floor(dmax)
Return true if range is approximately in same order of magnitude For example these sequences are in the same order of magnitude: - [1, 8, 5] # [1, 10) - [35, 20, 80] # [10 100) - [232, 730] # [100, 1000) Parameters ---------- x : array-like Values in base 10. Must be size 2 and ``rng[0] <= rng[1]``. delta : float Fuzz factor for approximation. It is multiplicative.
Below is the the instruction that describes the task: ### Input: Return true if range is approximately in same order of magnitude For example these sequences are in the same order of magnitude: - [1, 8, 5] # [1, 10) - [35, 20, 80] # [10 100) - [232, 730] # [100, 1000) Parameters ---------- x : array-like Values in base 10. Must be size 2 and ``rng[0] <= rng[1]``. delta : float Fuzz factor for approximation. It is multiplicative. ### Response: def same_log10_order_of_magnitude(x, delta=0.1): """ Return true if range is approximately in same order of magnitude For example these sequences are in the same order of magnitude: - [1, 8, 5] # [1, 10) - [35, 20, 80] # [10 100) - [232, 730] # [100, 1000) Parameters ---------- x : array-like Values in base 10. Must be size 2 and ``rng[0] <= rng[1]``. delta : float Fuzz factor for approximation. It is multiplicative. """ dmin = np.log10(np.min(x)*(1-delta)) dmax = np.log10(np.max(x)*(1+delta)) return np.floor(dmin) == np.floor(dmax)
def surrogateescape_handler(exc): """ Pure Python implementation of the PEP 383: the "surrogateescape" error handler of Python 3. Undecodable bytes will be replaced by a Unicode character U+DCxx on decoding, and these are translated into the original bytes on encoding. """ mystring = exc.object[exc.start:exc.end] try: if isinstance(exc, UnicodeDecodeError): # mystring is a byte-string in this case decoded = replace_surrogate_decode(mystring) elif isinstance(exc, UnicodeEncodeError): # In the case of u'\udcc3'.encode('ascii', # 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an # exception anyway after this function is called, even though I think # it's doing what it should. It seems that the strict encoder is called # to encode the unicode string that this function returns ... decoded = replace_surrogate_encode(mystring, exc) else: raise exc except NotASurrogateError: raise exc return (decoded, exc.end)
Pure Python implementation of the PEP 383: the "surrogateescape" error handler of Python 3. Undecodable bytes will be replaced by a Unicode character U+DCxx on decoding, and these are translated into the original bytes on encoding.
Below is the the instruction that describes the task: ### Input: Pure Python implementation of the PEP 383: the "surrogateescape" error handler of Python 3. Undecodable bytes will be replaced by a Unicode character U+DCxx on decoding, and these are translated into the original bytes on encoding. ### Response: def surrogateescape_handler(exc): """ Pure Python implementation of the PEP 383: the "surrogateescape" error handler of Python 3. Undecodable bytes will be replaced by a Unicode character U+DCxx on decoding, and these are translated into the original bytes on encoding. """ mystring = exc.object[exc.start:exc.end] try: if isinstance(exc, UnicodeDecodeError): # mystring is a byte-string in this case decoded = replace_surrogate_decode(mystring) elif isinstance(exc, UnicodeEncodeError): # In the case of u'\udcc3'.encode('ascii', # 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an # exception anyway after this function is called, even though I think # it's doing what it should. It seems that the strict encoder is called # to encode the unicode string that this function returns ... decoded = replace_surrogate_encode(mystring, exc) else: raise exc except NotASurrogateError: raise exc return (decoded, exc.end)
def index(self, key, default=UNSET): """Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError`` """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: if default is self.UNSET: raise KeyError('key {!r} not in list'.format(key)) return default return self._distance[0]
Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError``
Below is the the instruction that describes the task: ### Input: Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError`` ### Response: def index(self, key, default=UNSET): """Find the first key-value pair with key *key* and return its position. If the key is not found, return *default*. If default was not provided, raise a ``KeyError`` """ self._find_lt(key) node = self._path[0][2] if node is self._tail or key < node[0]: if default is self.UNSET: raise KeyError('key {!r} not in list'.format(key)) return default return self._distance[0]
def eval(self, code, mode="single"): """Evaluate code in the context of the frame.""" if isinstance(code, string_types): if PY2 and isinstance(code, text_type): # noqa code = UTF8_COOKIE + code.encode("utf-8") code = compile(code, "<interactive>", mode) return eval(code, self.globals, self.locals)
Evaluate code in the context of the frame.
Below is the the instruction that describes the task: ### Input: Evaluate code in the context of the frame. ### Response: def eval(self, code, mode="single"): """Evaluate code in the context of the frame.""" if isinstance(code, string_types): if PY2 and isinstance(code, text_type): # noqa code = UTF8_COOKIE + code.encode("utf-8") code = compile(code, "<interactive>", mode) return eval(code, self.globals, self.locals)
def to_set_field(cls): """ Returns a callable instance that will convert a value to a Sequence. :param cls: Valid class type of the items in the Sequence. :return: instance of the SequenceConverter. """ class SetConverter(object): def __init__(self, cls): self._cls = cls @property def cls(self): return resolve_class(self._cls) def __call__(self, values): values = values or set() args = {to_model(self.cls, value) for value in values} return TypedSet(cls=self.cls, args=args) return SetConverter(cls)
Returns a callable instance that will convert a value to a Sequence. :param cls: Valid class type of the items in the Sequence. :return: instance of the SequenceConverter.
Below is the the instruction that describes the task: ### Input: Returns a callable instance that will convert a value to a Sequence. :param cls: Valid class type of the items in the Sequence. :return: instance of the SequenceConverter. ### Response: def to_set_field(cls): """ Returns a callable instance that will convert a value to a Sequence. :param cls: Valid class type of the items in the Sequence. :return: instance of the SequenceConverter. """ class SetConverter(object): def __init__(self, cls): self._cls = cls @property def cls(self): return resolve_class(self._cls) def __call__(self, values): values = values or set() args = {to_model(self.cls, value) for value in values} return TypedSet(cls=self.cls, args=args) return SetConverter(cls)
def _close_cursor_now(self, cursor_id, address=None): """Send a kill cursors message with the given id. What closing the cursor actually means depends on this client's cursor manager. If there is none, the cursor is closed synchronously on the current thread. """ if not isinstance(cursor_id, integer_types): raise TypeError("cursor_id must be an instance of (int, long)") if self.__cursor_manager is not None: self.__cursor_manager.close(cursor_id, address) else: self._kill_cursors([cursor_id], address, self._get_topology())
Send a kill cursors message with the given id. What closing the cursor actually means depends on this client's cursor manager. If there is none, the cursor is closed synchronously on the current thread.
Below is the the instruction that describes the task: ### Input: Send a kill cursors message with the given id. What closing the cursor actually means depends on this client's cursor manager. If there is none, the cursor is closed synchronously on the current thread. ### Response: def _close_cursor_now(self, cursor_id, address=None): """Send a kill cursors message with the given id. What closing the cursor actually means depends on this client's cursor manager. If there is none, the cursor is closed synchronously on the current thread. """ if not isinstance(cursor_id, integer_types): raise TypeError("cursor_id must be an instance of (int, long)") if self.__cursor_manager is not None: self.__cursor_manager.close(cursor_id, address) else: self._kill_cursors([cursor_id], address, self._get_topology())
def request_verification(self, user, identity): """ Sends the user a verification email with a link to verify ownership of the email address. :param user: User id or object :param identity: Identity id or object :return: requests Response object """ return UserIdentityRequest(self).put(self.endpoint.request_verification, user, identity)
Sends the user a verification email with a link to verify ownership of the email address. :param user: User id or object :param identity: Identity id or object :return: requests Response object
Below is the the instruction that describes the task: ### Input: Sends the user a verification email with a link to verify ownership of the email address. :param user: User id or object :param identity: Identity id or object :return: requests Response object ### Response: def request_verification(self, user, identity): """ Sends the user a verification email with a link to verify ownership of the email address. :param user: User id or object :param identity: Identity id or object :return: requests Response object """ return UserIdentityRequest(self).put(self.endpoint.request_verification, user, identity)
def free_params(self, value): """Set the free parameters. Note that this bypasses enforce_bounds. """ value = scipy.asarray(value, dtype=float) self.K_up_to_date = False self.k.free_params = value[:self.k.num_free_params] self.noise_k.free_params = value[self.k.num_free_params:self.k.num_free_params + self.noise_k.num_free_params] if self.mu is not None: self.mu.free_params = value[self.k.num_free_params + self.noise_k.num_free_params:]
Set the free parameters. Note that this bypasses enforce_bounds.
Below is the the instruction that describes the task: ### Input: Set the free parameters. Note that this bypasses enforce_bounds. ### Response: def free_params(self, value): """Set the free parameters. Note that this bypasses enforce_bounds. """ value = scipy.asarray(value, dtype=float) self.K_up_to_date = False self.k.free_params = value[:self.k.num_free_params] self.noise_k.free_params = value[self.k.num_free_params:self.k.num_free_params + self.noise_k.num_free_params] if self.mu is not None: self.mu.free_params = value[self.k.num_free_params + self.noise_k.num_free_params:]
def _local_pauli_eig_meas(op, idx): """ Generate gate sequence to measure in the eigenbasis of a Pauli operator, assuming we are only able to measure in the Z eigenbasis. (Note: The unitary operations of this Program are essentially the Hermitian conjugates of those in :py:func:`_one_q_pauli_prep`) """ if op == 'X': return Program(RY(-pi / 2, idx)) elif op == 'Y': return Program(RX(pi / 2, idx)) elif op == 'Z': return Program() raise ValueError(f'Unknown operation {op}')
Generate gate sequence to measure in the eigenbasis of a Pauli operator, assuming we are only able to measure in the Z eigenbasis. (Note: The unitary operations of this Program are essentially the Hermitian conjugates of those in :py:func:`_one_q_pauli_prep`)
Below is the the instruction that describes the task: ### Input: Generate gate sequence to measure in the eigenbasis of a Pauli operator, assuming we are only able to measure in the Z eigenbasis. (Note: The unitary operations of this Program are essentially the Hermitian conjugates of those in :py:func:`_one_q_pauli_prep`) ### Response: def _local_pauli_eig_meas(op, idx): """ Generate gate sequence to measure in the eigenbasis of a Pauli operator, assuming we are only able to measure in the Z eigenbasis. (Note: The unitary operations of this Program are essentially the Hermitian conjugates of those in :py:func:`_one_q_pauli_prep`) """ if op == 'X': return Program(RY(-pi / 2, idx)) elif op == 'Y': return Program(RX(pi / 2, idx)) elif op == 'Z': return Program() raise ValueError(f'Unknown operation {op}')
def emit(self, record): """Write record as journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present. """ if record.args and isinstance(record.args, collections.Mapping): extra = dict(self._extra, **record.args) # Merge metadata from handler and record else: extra = self._extra try: msg = self.format(record) pri = self.mapPriority(record.levelno) mid = getattr(record, 'MESSAGE_ID', None) send(msg, SOCKET=self.socket, MESSAGE_ID=mid, PRIORITY=format(pri), LOGGER=record.name, THREAD_NAME=record.threadName, CODE_FILE=record.pathname, CODE_LINE=record.lineno, CODE_FUNC=record.funcName, **extra) except Exception: self.handleError(record)
Write record as journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present.
Below is the the instruction that describes the task: ### Input: Write record as journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present. ### Response: def emit(self, record): """Write record as journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present. """ if record.args and isinstance(record.args, collections.Mapping): extra = dict(self._extra, **record.args) # Merge metadata from handler and record else: extra = self._extra try: msg = self.format(record) pri = self.mapPriority(record.levelno) mid = getattr(record, 'MESSAGE_ID', None) send(msg, SOCKET=self.socket, MESSAGE_ID=mid, PRIORITY=format(pri), LOGGER=record.name, THREAD_NAME=record.threadName, CODE_FILE=record.pathname, CODE_LINE=record.lineno, CODE_FUNC=record.funcName, **extra) except Exception: self.handleError(record)
def periodic_distance(a, b, periodic): ''' Periodic distance between two arrays. Periodic is a 3 dimensional array containing the 3 box sizes. ''' a = np.array(a) b = np.array(b) periodic = np.array(periodic) delta = np.abs(a - b) delta = np.where(delta > 0.5 * periodic, periodic - delta, delta) return np.sqrt((delta ** 2).sum(axis=-1))
Periodic distance between two arrays. Periodic is a 3 dimensional array containing the 3 box sizes.
Below is the the instruction that describes the task: ### Input: Periodic distance between two arrays. Periodic is a 3 dimensional array containing the 3 box sizes. ### Response: def periodic_distance(a, b, periodic): ''' Periodic distance between two arrays. Periodic is a 3 dimensional array containing the 3 box sizes. ''' a = np.array(a) b = np.array(b) periodic = np.array(periodic) delta = np.abs(a - b) delta = np.where(delta > 0.5 * periodic, periodic - delta, delta) return np.sqrt((delta ** 2).sum(axis=-1))
def unpack_from_dict(fmt, names, data, offset=0): """Same as :func:`~bitstruct.unpack_from_dict()`, but returns a dictionary. See :func:`~bitstruct.pack_dict()` for details on `names`. """ return CompiledFormatDict(fmt, names).unpack_from(data, offset)
Same as :func:`~bitstruct.unpack_from_dict()`, but returns a dictionary. See :func:`~bitstruct.pack_dict()` for details on `names`.
Below is the the instruction that describes the task: ### Input: Same as :func:`~bitstruct.unpack_from_dict()`, but returns a dictionary. See :func:`~bitstruct.pack_dict()` for details on `names`. ### Response: def unpack_from_dict(fmt, names, data, offset=0): """Same as :func:`~bitstruct.unpack_from_dict()`, but returns a dictionary. See :func:`~bitstruct.pack_dict()` for details on `names`. """ return CompiledFormatDict(fmt, names).unpack_from(data, offset)
def is_valid_country_abbrev(abbrev, case_sensitive=False): """ Given a country code abbreviation, check to see if it matches the country table. abbrev: (str) Country code to evaluate. case_sensitive: (bool) When True, enforce case sensitivity. Returns True if valid, False if not. """ if case_sensitive: country_code = abbrev else: country_code = abbrev.upper() for code, full_name in COUNTRY_TUPLES: if country_code == code: return True return False
Given a country code abbreviation, check to see if it matches the country table. abbrev: (str) Country code to evaluate. case_sensitive: (bool) When True, enforce case sensitivity. Returns True if valid, False if not.
Below is the the instruction that describes the task: ### Input: Given a country code abbreviation, check to see if it matches the country table. abbrev: (str) Country code to evaluate. case_sensitive: (bool) When True, enforce case sensitivity. Returns True if valid, False if not. ### Response: def is_valid_country_abbrev(abbrev, case_sensitive=False): """ Given a country code abbreviation, check to see if it matches the country table. abbrev: (str) Country code to evaluate. case_sensitive: (bool) When True, enforce case sensitivity. Returns True if valid, False if not. """ if case_sensitive: country_code = abbrev else: country_code = abbrev.upper() for code, full_name in COUNTRY_TUPLES: if country_code == code: return True return False
def mline_point_(self, col, x=None, y=None, rsum=None, rmean=None): """ Splits a column into multiple series based on the column's unique values. Then visualize theses series in a chart. Parameters: column to split, x axis column, y axis column Optional: rsum="1D" to resample and sum data an rmean="1D" to mean the data """ line = self._multiseries(col, x, y, "line", rsum, rmean) point = self._multiseries(col, x, y, "point", rsum, rmean) return line * point
Splits a column into multiple series based on the column's unique values. Then visualize theses series in a chart. Parameters: column to split, x axis column, y axis column Optional: rsum="1D" to resample and sum data an rmean="1D" to mean the data
Below is the the instruction that describes the task: ### Input: Splits a column into multiple series based on the column's unique values. Then visualize theses series in a chart. Parameters: column to split, x axis column, y axis column Optional: rsum="1D" to resample and sum data an rmean="1D" to mean the data ### Response: def mline_point_(self, col, x=None, y=None, rsum=None, rmean=None): """ Splits a column into multiple series based on the column's unique values. Then visualize theses series in a chart. Parameters: column to split, x axis column, y axis column Optional: rsum="1D" to resample and sum data an rmean="1D" to mean the data """ line = self._multiseries(col, x, y, "line", rsum, rmean) point = self._multiseries(col, x, y, "point", rsum, rmean) return line * point
def processStream(self): """Process a brotli stream. """ print('addr hex{:{}s}binary context explanation'.format( '', self.width-10)) print('Stream header'.center(60, '-')) self.windowSize = self.verboseRead(WindowSizeAlphabet()) print('Metablock header'.center(60, '=')) self.ISLAST = False self.output = bytearray() while not self.ISLAST: self.ISLAST = self.verboseRead( BoolCode('LAST', description="Last block")) if self.ISLAST: if self.verboseRead( BoolCode('EMPTY', description="Empty block")): break if self.metablockLength(): continue if not self.ISLAST and self.uncompressed(): continue print('Block type descriptors'.center(60, '-')) self.numberOfBlockTypes = {} self.currentBlockCounts = {} self.blockTypeCodes = {} self.blockCountCodes = {} for blockType in (L,I,D): self.blockType(blockType) print('Distance code parameters'.center(60, '-')) self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet()) self.readLiteralContextModes() print('Context maps'.center(60, '-')) self.cmaps = {} #keep the number of each kind of prefix tree for the last loop numberOfTrees = {I: self.numberOfBlockTypes[I]} for blockType in (L,D): numberOfTrees[blockType] = self.contextMap(blockType) print('Prefix code lists'.center(60, '-')) self.prefixCodes = {} for blockType in (L,I,D): self.readPrefixArray(blockType, numberOfTrees[blockType]) self.metablock()
Process a brotli stream.
Below is the the instruction that describes the task: ### Input: Process a brotli stream. ### Response: def processStream(self): """Process a brotli stream. """ print('addr hex{:{}s}binary context explanation'.format( '', self.width-10)) print('Stream header'.center(60, '-')) self.windowSize = self.verboseRead(WindowSizeAlphabet()) print('Metablock header'.center(60, '=')) self.ISLAST = False self.output = bytearray() while not self.ISLAST: self.ISLAST = self.verboseRead( BoolCode('LAST', description="Last block")) if self.ISLAST: if self.verboseRead( BoolCode('EMPTY', description="Empty block")): break if self.metablockLength(): continue if not self.ISLAST and self.uncompressed(): continue print('Block type descriptors'.center(60, '-')) self.numberOfBlockTypes = {} self.currentBlockCounts = {} self.blockTypeCodes = {} self.blockCountCodes = {} for blockType in (L,I,D): self.blockType(blockType) print('Distance code parameters'.center(60, '-')) self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet()) self.readLiteralContextModes() print('Context maps'.center(60, '-')) self.cmaps = {} #keep the number of each kind of prefix tree for the last loop numberOfTrees = {I: self.numberOfBlockTypes[I]} for blockType in (L,D): numberOfTrees[blockType] = self.contextMap(blockType) print('Prefix code lists'.center(60, '-')) self.prefixCodes = {} for blockType in (L,I,D): self.readPrefixArray(blockType, numberOfTrees[blockType]) self.metablock()
def build_recursive_delocalize_command(source, outputs, file_provider): """Return a multi-line string with a shell script to copy recursively. Arguments: source: Folder with the data. For example /mnt/data outputs: a list of OutputFileParam. file_provider: file provider string used to filter the output params; the returned command will only apply outputs whose file provider matches this file filter. Returns: a multi-line string with a shell script that copies the inputs recursively to GCS. """ command = _LOCALIZE_COMMAND_MAP[file_provider] filtered_outputs = [ var for var in outputs if var.recursive and var.file_provider == file_provider ] return '\n'.join([ textwrap.dedent(""" for ((i = 0; i < 3; i++)); do if {command} {data_mount}/{docker_path} {destination_uri}; then break elif ((i == 2)); then 2>&1 echo "Recursive de-localization failed." exit 1 fi done """).format( command=command, data_mount=source.rstrip('/'), docker_path=var.docker_path, destination_uri=var.uri) for var in filtered_outputs ])
Return a multi-line string with a shell script to copy recursively. Arguments: source: Folder with the data. For example /mnt/data outputs: a list of OutputFileParam. file_provider: file provider string used to filter the output params; the returned command will only apply outputs whose file provider matches this file filter. Returns: a multi-line string with a shell script that copies the inputs recursively to GCS.
Below is the the instruction that describes the task: ### Input: Return a multi-line string with a shell script to copy recursively. Arguments: source: Folder with the data. For example /mnt/data outputs: a list of OutputFileParam. file_provider: file provider string used to filter the output params; the returned command will only apply outputs whose file provider matches this file filter. Returns: a multi-line string with a shell script that copies the inputs recursively to GCS. ### Response: def build_recursive_delocalize_command(source, outputs, file_provider): """Return a multi-line string with a shell script to copy recursively. Arguments: source: Folder with the data. For example /mnt/data outputs: a list of OutputFileParam. file_provider: file provider string used to filter the output params; the returned command will only apply outputs whose file provider matches this file filter. Returns: a multi-line string with a shell script that copies the inputs recursively to GCS. """ command = _LOCALIZE_COMMAND_MAP[file_provider] filtered_outputs = [ var for var in outputs if var.recursive and var.file_provider == file_provider ] return '\n'.join([ textwrap.dedent(""" for ((i = 0; i < 3; i++)); do if {command} {data_mount}/{docker_path} {destination_uri}; then break elif ((i == 2)); then 2>&1 echo "Recursive de-localization failed." exit 1 fi done """).format( command=command, data_mount=source.rstrip('/'), docker_path=var.docker_path, destination_uri=var.uri) for var in filtered_outputs ])
def get_end_balance(self, after: date) -> Decimal: """ Calculates account balance """ # create a new date without hours #date_corrected = datetimeutils.end_of_day(after) datum = Datum() datum.from_date(after) datum.end_of_day() #log(DEBUG, "getting balance on %s", date_corrected) return self.get_balance_on(datum.value)
Calculates account balance
Below is the the instruction that describes the task: ### Input: Calculates account balance ### Response: def get_end_balance(self, after: date) -> Decimal: """ Calculates account balance """ # create a new date without hours #date_corrected = datetimeutils.end_of_day(after) datum = Datum() datum.from_date(after) datum.end_of_day() #log(DEBUG, "getting balance on %s", date_corrected) return self.get_balance_on(datum.value)
def jenks_breaks(values, nb_class): """ Compute jenks natural breaks on a sequence of `values`, given `nb_class`, the number of desired class. Parameters ---------- values : array-like The Iterable sequence of numbers (integer/float) to be used. nb_class : int The desired number of class (as some other functions requests a `k` value, `nb_class` is like `k` + 1). Have to be lesser than the length of `values` and greater than 2. Returns ------- breaks : tuple of floats The computed break values, including minimum and maximum, in order to have all the bounds for building `nb_class` class, so the returned tuple has a length of `nb_class` + 1. Examples -------- Using nb_class = 3, expecting 4 break values , including min and max : >>> jenks_breaks( [1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3], nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8) """ if not isinstance(values, Iterable) or isinstance(values, (str, bytes)): raise TypeError("A sequence of numbers is expected") if isinstance(nb_class, float) and int(nb_class) == nb_class: nb_class = int(nb_class) if not isinstance(nb_class, int): raise TypeError( "Number of class have to be a positive integer: " "expected an instance of 'int' but found {}" .format(type(nb_class))) nb_values = len(values) if np and isinstance(values, np.ndarray): values = values[np.argwhere(np.isfinite(values)).reshape(-1)] else: values = [i for i in values if isfinite(i)] if len(values) != nb_values: warnings.warn('Invalid values encountered (NaN or Inf) were ignored') nb_values = len(values) if nb_class >= nb_values or nb_class < 2: raise ValueError("Number of class have to be an integer " "greater than 2 and " "smaller than the number of values to use") return jenks._jenks_breaks(values, nb_class)
Compute jenks natural breaks on a sequence of `values`, given `nb_class`, the number of desired class. Parameters ---------- values : array-like The Iterable sequence of numbers (integer/float) to be used. nb_class : int The desired number of class (as some other functions requests a `k` value, `nb_class` is like `k` + 1). Have to be lesser than the length of `values` and greater than 2. Returns ------- breaks : tuple of floats The computed break values, including minimum and maximum, in order to have all the bounds for building `nb_class` class, so the returned tuple has a length of `nb_class` + 1. Examples -------- Using nb_class = 3, expecting 4 break values , including min and max : >>> jenks_breaks( [1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3], nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8)
Below is the the instruction that describes the task: ### Input: Compute jenks natural breaks on a sequence of `values`, given `nb_class`, the number of desired class. Parameters ---------- values : array-like The Iterable sequence of numbers (integer/float) to be used. nb_class : int The desired number of class (as some other functions requests a `k` value, `nb_class` is like `k` + 1). Have to be lesser than the length of `values` and greater than 2. Returns ------- breaks : tuple of floats The computed break values, including minimum and maximum, in order to have all the bounds for building `nb_class` class, so the returned tuple has a length of `nb_class` + 1. Examples -------- Using nb_class = 3, expecting 4 break values , including min and max : >>> jenks_breaks( [1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3], nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8) ### Response: def jenks_breaks(values, nb_class): """ Compute jenks natural breaks on a sequence of `values`, given `nb_class`, the number of desired class. Parameters ---------- values : array-like The Iterable sequence of numbers (integer/float) to be used. nb_class : int The desired number of class (as some other functions requests a `k` value, `nb_class` is like `k` + 1). Have to be lesser than the length of `values` and greater than 2. Returns ------- breaks : tuple of floats The computed break values, including minimum and maximum, in order to have all the bounds for building `nb_class` class, so the returned tuple has a length of `nb_class` + 1. Examples -------- Using nb_class = 3, expecting 4 break values , including min and max : >>> jenks_breaks( [1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3], nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8) """ if not isinstance(values, Iterable) or isinstance(values, (str, bytes)): raise TypeError("A sequence of numbers is expected") if isinstance(nb_class, float) and int(nb_class) == nb_class: nb_class = int(nb_class) if not isinstance(nb_class, int): raise TypeError( "Number of class have to be a positive integer: " "expected an instance of 'int' but found {}" .format(type(nb_class))) nb_values = len(values) if np and isinstance(values, np.ndarray): values = values[np.argwhere(np.isfinite(values)).reshape(-1)] else: values = [i for i in values if isfinite(i)] if len(values) != nb_values: warnings.warn('Invalid values encountered (NaN or Inf) were ignored') nb_values = len(values) if nb_class >= nb_values or nb_class < 2: raise ValueError("Number of class have to be an integer " "greater than 2 and " "smaller than the number of values to use") return jenks._jenks_breaks(values, nb_class)
def fields(self): """Filter fields based on request query parameters.""" fields = super().fields return apply_subfield_projection(self, copy.copy(fields))
Filter fields based on request query parameters.
Below is the the instruction that describes the task: ### Input: Filter fields based on request query parameters. ### Response: def fields(self): """Filter fields based on request query parameters.""" fields = super().fields return apply_subfield_projection(self, copy.copy(fields))
def _get_apphook_field_names(model): """ Return all foreign key field names for a AppHookConfig based model """ from .models import AppHookConfig # avoid circular dependencies fields = [] for field in model._meta.fields: if isinstance(field, ForeignKey) and issubclass(field.remote_field.model, AppHookConfig): fields.append(field) return [field.name for field in fields]
Return all foreign key field names for a AppHookConfig based model
Below is the the instruction that describes the task: ### Input: Return all foreign key field names for a AppHookConfig based model ### Response: def _get_apphook_field_names(model): """ Return all foreign key field names for a AppHookConfig based model """ from .models import AppHookConfig # avoid circular dependencies fields = [] for field in model._meta.fields: if isinstance(field, ForeignKey) and issubclass(field.remote_field.model, AppHookConfig): fields.append(field) return [field.name for field in fields]
def build_includes(include_packages, freezer=None, optional=None): """ Iterate the list of packages to build a complete list of those packages as well as all subpackages. :param include_packages: list of package names :type: include_pacakges: list of basestr :param freezer: The freezer to use (See FREEZER constants) :param optional: Optional pacakge names to include (will only issue a warning if they don't exist) :return: complete set of package includes """ freezer = resolve_freezer(freezer) # Import (or get reference to) all listed packages to ensure that they exist. package_references = _import_packages(include_packages, optional=optional) # Find all includes for the given freezer type includes = freezer.build_includes(package_references) return includes
Iterate the list of packages to build a complete list of those packages as well as all subpackages. :param include_packages: list of package names :type: include_pacakges: list of basestr :param freezer: The freezer to use (See FREEZER constants) :param optional: Optional pacakge names to include (will only issue a warning if they don't exist) :return: complete set of package includes
Below is the the instruction that describes the task: ### Input: Iterate the list of packages to build a complete list of those packages as well as all subpackages. :param include_packages: list of package names :type: include_pacakges: list of basestr :param freezer: The freezer to use (See FREEZER constants) :param optional: Optional pacakge names to include (will only issue a warning if they don't exist) :return: complete set of package includes ### Response: def build_includes(include_packages, freezer=None, optional=None): """ Iterate the list of packages to build a complete list of those packages as well as all subpackages. :param include_packages: list of package names :type: include_pacakges: list of basestr :param freezer: The freezer to use (See FREEZER constants) :param optional: Optional pacakge names to include (will only issue a warning if they don't exist) :return: complete set of package includes """ freezer = resolve_freezer(freezer) # Import (or get reference to) all listed packages to ensure that they exist. package_references = _import_packages(include_packages, optional=optional) # Find all includes for the given freezer type includes = freezer.build_includes(package_references) return includes
def addchild(self, startip, endip, name, description): """ Method takes inpur of str startip, str endip, name, and description and adds a child scope. The startip and endip MUST be in the IP address range of the parent scope. :param startip: str of ipv4 address of the first address in the child scope :param endip: str of ipv4 address of the last address in the child scope :param name: of the owner of the child scope :param description: description of the child scope :return: """ add_child_ip_scope(self.auth, self.url, startip, endip, name, description, self.id)
Method takes inpur of str startip, str endip, name, and description and adds a child scope. The startip and endip MUST be in the IP address range of the parent scope. :param startip: str of ipv4 address of the first address in the child scope :param endip: str of ipv4 address of the last address in the child scope :param name: of the owner of the child scope :param description: description of the child scope :return:
Below is the the instruction that describes the task: ### Input: Method takes inpur of str startip, str endip, name, and description and adds a child scope. The startip and endip MUST be in the IP address range of the parent scope. :param startip: str of ipv4 address of the first address in the child scope :param endip: str of ipv4 address of the last address in the child scope :param name: of the owner of the child scope :param description: description of the child scope :return: ### Response: def addchild(self, startip, endip, name, description): """ Method takes inpur of str startip, str endip, name, and description and adds a child scope. The startip and endip MUST be in the IP address range of the parent scope. :param startip: str of ipv4 address of the first address in the child scope :param endip: str of ipv4 address of the last address in the child scope :param name: of the owner of the child scope :param description: description of the child scope :return: """ add_child_ip_scope(self.auth, self.url, startip, endip, name, description, self.id)
def add_external_reference(self,term_id, external_ref): """ Adds an external reference for the given term @type term_id: string @param term_id: the term identifier @type external_ref: L{CexternalReference} @param external_ref: the external reference object """ if term_id in self.idx: term_obj = Cterm(self.idx[term_id],self.type) term_obj.add_external_reference(external_ref) else: print('{term_id} not in self.idx'.format(**locals()))
Adds an external reference for the given term @type term_id: string @param term_id: the term identifier @type external_ref: L{CexternalReference} @param external_ref: the external reference object
Below is the the instruction that describes the task: ### Input: Adds an external reference for the given term @type term_id: string @param term_id: the term identifier @type external_ref: L{CexternalReference} @param external_ref: the external reference object ### Response: def add_external_reference(self,term_id, external_ref): """ Adds an external reference for the given term @type term_id: string @param term_id: the term identifier @type external_ref: L{CexternalReference} @param external_ref: the external reference object """ if term_id in self.idx: term_obj = Cterm(self.idx[term_id],self.type) term_obj.add_external_reference(external_ref) else: print('{term_id} not in self.idx'.format(**locals()))
def ndarray_to_imagedatadict(nparr): """ Convert the numpy array nparr into a suitable ImageList entry dictionary. Returns a dictionary with the appropriate Data, DataType, PixelDepth to be inserted into a dm3 tag dictionary and written to a file. """ ret = {} dm_type = None for k, v in iter(dm_image_dtypes.items()): if v[1] == nparr.dtype.type: dm_type = k break if dm_type is None and nparr.dtype == numpy.uint8 and nparr.shape[-1] in (3, 4): ret["DataType"] = 23 ret["PixelDepth"] = 4 if nparr.shape[2] == 4: rgb_view = nparr.view(numpy.int32).reshape(nparr.shape[:-1]) # squash the color into uint32 else: assert nparr.shape[2] == 3 rgba_image = numpy.empty(nparr.shape[:-1] + (4,), numpy.uint8) rgba_image[:,:,0:3] = nparr rgba_image[:,:,3] = 255 rgb_view = rgba_image.view(numpy.int32).reshape(rgba_image.shape[:-1]) # squash the color into uint32 ret["Dimensions"] = list(rgb_view.shape[::-1]) ret["Data"] = parse_dm3.array.array(platform_independent_char(rgb_view.dtype), rgb_view.flatten()) else: ret["DataType"] = dm_type ret["PixelDepth"] = nparr.dtype.itemsize ret["Dimensions"] = list(nparr.shape[::-1]) if nparr.dtype.type in np_to_structarray_map: types = np_to_structarray_map[nparr.dtype.type] ret["Data"] = parse_dm3.structarray(types) ret["Data"].raw_data = bytes(numpy.array(nparr, copy=False).data) else: ret["Data"] = parse_dm3.array.array(platform_independent_char(nparr.dtype), numpy.array(nparr, copy=False).flatten()) return ret
Convert the numpy array nparr into a suitable ImageList entry dictionary. Returns a dictionary with the appropriate Data, DataType, PixelDepth to be inserted into a dm3 tag dictionary and written to a file.
Below is the the instruction that describes the task: ### Input: Convert the numpy array nparr into a suitable ImageList entry dictionary. Returns a dictionary with the appropriate Data, DataType, PixelDepth to be inserted into a dm3 tag dictionary and written to a file. ### Response: def ndarray_to_imagedatadict(nparr): """ Convert the numpy array nparr into a suitable ImageList entry dictionary. Returns a dictionary with the appropriate Data, DataType, PixelDepth to be inserted into a dm3 tag dictionary and written to a file. """ ret = {} dm_type = None for k, v in iter(dm_image_dtypes.items()): if v[1] == nparr.dtype.type: dm_type = k break if dm_type is None and nparr.dtype == numpy.uint8 and nparr.shape[-1] in (3, 4): ret["DataType"] = 23 ret["PixelDepth"] = 4 if nparr.shape[2] == 4: rgb_view = nparr.view(numpy.int32).reshape(nparr.shape[:-1]) # squash the color into uint32 else: assert nparr.shape[2] == 3 rgba_image = numpy.empty(nparr.shape[:-1] + (4,), numpy.uint8) rgba_image[:,:,0:3] = nparr rgba_image[:,:,3] = 255 rgb_view = rgba_image.view(numpy.int32).reshape(rgba_image.shape[:-1]) # squash the color into uint32 ret["Dimensions"] = list(rgb_view.shape[::-1]) ret["Data"] = parse_dm3.array.array(platform_independent_char(rgb_view.dtype), rgb_view.flatten()) else: ret["DataType"] = dm_type ret["PixelDepth"] = nparr.dtype.itemsize ret["Dimensions"] = list(nparr.shape[::-1]) if nparr.dtype.type in np_to_structarray_map: types = np_to_structarray_map[nparr.dtype.type] ret["Data"] = parse_dm3.structarray(types) ret["Data"].raw_data = bytes(numpy.array(nparr, copy=False).data) else: ret["Data"] = parse_dm3.array.array(platform_independent_char(nparr.dtype), numpy.array(nparr, copy=False).flatten()) return ret
def get(self, name): """ Return a value from this evaluator. Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times with and without no_grad() context. It is advised in such cases to not use no_grad and stick to .detach() """ if name in self._storage: return self._storage[name] elif name in self._providers: value = self._storage[name] = self._providers[name](self) return value elif name.startswith('rollout:'): rollout_name = name[8:] value = self._storage[name] = self.rollout.batch_tensor(rollout_name) return value else: raise RuntimeError(f"Key {name} is not provided by this evaluator")
Return a value from this evaluator. Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times with and without no_grad() context. It is advised in such cases to not use no_grad and stick to .detach()
Below is the the instruction that describes the task: ### Input: Return a value from this evaluator. Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times with and without no_grad() context. It is advised in such cases to not use no_grad and stick to .detach() ### Response: def get(self, name): """ Return a value from this evaluator. Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times with and without no_grad() context. It is advised in such cases to not use no_grad and stick to .detach() """ if name in self._storage: return self._storage[name] elif name in self._providers: value = self._storage[name] = self._providers[name](self) return value elif name.startswith('rollout:'): rollout_name = name[8:] value = self._storage[name] = self.rollout.batch_tensor(rollout_name) return value else: raise RuntimeError(f"Key {name} is not provided by this evaluator")
def fault_barrier(fn): """Method decorator to catch and log errors, then send fail message.""" @functools.wraps(fn) def process(self, tup): try: return fn(self, tup) except Exception as e: if isinstance(e, KeyboardInterrupt): return print(str(e), file=sys.stderr) self.fail(tup) return process
Method decorator to catch and log errors, then send fail message.
Below is the the instruction that describes the task: ### Input: Method decorator to catch and log errors, then send fail message. ### Response: def fault_barrier(fn): """Method decorator to catch and log errors, then send fail message.""" @functools.wraps(fn) def process(self, tup): try: return fn(self, tup) except Exception as e: if isinstance(e, KeyboardInterrupt): return print(str(e), file=sys.stderr) self.fail(tup) return process
def dst(self, dt): """Calculate delta for daylight saving.""" # Daylight saving starts on the second Sunday of March at 2AM standard dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \ + timedelta(hours=2) # Daylight saving ends on the first Sunday of November at 2AM standard dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2) if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date: return timedelta(hours=1) else: return timedelta(0)
Calculate delta for daylight saving.
Below is the the instruction that describes the task: ### Input: Calculate delta for daylight saving. ### Response: def dst(self, dt): """Calculate delta for daylight saving.""" # Daylight saving starts on the second Sunday of March at 2AM standard dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \ + timedelta(hours=2) # Daylight saving ends on the first Sunday of November at 2AM standard dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2) if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date: return timedelta(hours=1) else: return timedelta(0)
def main(): """pyprf_sim entry point.""" # Get list of input arguments (without first one, which is the path to the # function that is called): --NOTE: This is another way of accessing # input arguments, but since we use 'argparse' it is redundant. # lstArgs = sys.argv[1:] strWelcome = 'pyprf_sim ' + __version__ strDec = '=' * len(strWelcome) print(strDec + '\n' + strWelcome + '\n' + strDec) objNspc = get_arg_parse() # Print info if no config argument is provided. if any(item is None for item in [objNspc.strCsvPrf, objNspc.strStmApr]): print('Please provide necessary file paths, e.g.:') print(' pyprf_sim -strCsvPrf /path/to/my_config_file.csv') print(' -strStmApr /path/to/my_stim_apertures.npy') else: # Signal non-test mode to lower functions (needed for pytest): lgcTest = False # Call to main function, to invoke pRF analysis: pyprf_sim(objNspc.strCsvPrf, objNspc.strStmApr, lgcTest=lgcTest, lgcNoise=objNspc.lgcNoise, lgcRtnNrl=objNspc.lgcRtnNrl, lstRat=objNspc.supsur)
pyprf_sim entry point.
Below is the the instruction that describes the task: ### Input: pyprf_sim entry point. ### Response: def main(): """pyprf_sim entry point.""" # Get list of input arguments (without first one, which is the path to the # function that is called): --NOTE: This is another way of accessing # input arguments, but since we use 'argparse' it is redundant. # lstArgs = sys.argv[1:] strWelcome = 'pyprf_sim ' + __version__ strDec = '=' * len(strWelcome) print(strDec + '\n' + strWelcome + '\n' + strDec) objNspc = get_arg_parse() # Print info if no config argument is provided. if any(item is None for item in [objNspc.strCsvPrf, objNspc.strStmApr]): print('Please provide necessary file paths, e.g.:') print(' pyprf_sim -strCsvPrf /path/to/my_config_file.csv') print(' -strStmApr /path/to/my_stim_apertures.npy') else: # Signal non-test mode to lower functions (needed for pytest): lgcTest = False # Call to main function, to invoke pRF analysis: pyprf_sim(objNspc.strCsvPrf, objNspc.strStmApr, lgcTest=lgcTest, lgcNoise=objNspc.lgcNoise, lgcRtnNrl=objNspc.lgcRtnNrl, lstRat=objNspc.supsur)
def from_quad_tree(cls, quad_tree): """Creates a tile from a Microsoft QuadTree""" assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len(str(quad_tree)) offset = int(math.pow(2, zoom)) - 1 google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0) for bits in zip(*(reversed(divmod(digit, 2)) for digit in (int(c) for c in str(quad_tree))))] return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom)
Creates a tile from a Microsoft QuadTree
Below is the the instruction that describes the task: ### Input: Creates a tile from a Microsoft QuadTree ### Response: def from_quad_tree(cls, quad_tree): """Creates a tile from a Microsoft QuadTree""" assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len(str(quad_tree)) offset = int(math.pow(2, zoom)) - 1 google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0) for bits in zip(*(reversed(divmod(digit, 2)) for digit in (int(c) for c in str(quad_tree))))] return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom)
def _infer_spaces(s): """ Uses dynamic programming to infer the location of spaces in a string without spaces. """ s = s.lower() # Find the best match for the i first characters, assuming cost has # been built for the i-1 first characters. # Returns a pair (match_cost, match_length). def best_match(i): candidates = enumerate(reversed(cost[max(0, i - MAXWORD):i])) return min((c + WORDCOST.get(s[i-k-1: i], 9e999), k + 1) for k, c in candidates) # Build the cost array. cost = [0] for i in range(1, len(s) + 1): c, k = best_match(i) cost.append(c) # Backtrack to recover the minimal-cost string. out = [] i = len(s) while i > 0: c, k = best_match(i) assert c == cost[i] out.append(s[i-k:i]) i -= k return u" ".join(reversed(out))
Uses dynamic programming to infer the location of spaces in a string without spaces.
Below is the the instruction that describes the task: ### Input: Uses dynamic programming to infer the location of spaces in a string without spaces. ### Response: def _infer_spaces(s): """ Uses dynamic programming to infer the location of spaces in a string without spaces. """ s = s.lower() # Find the best match for the i first characters, assuming cost has # been built for the i-1 first characters. # Returns a pair (match_cost, match_length). def best_match(i): candidates = enumerate(reversed(cost[max(0, i - MAXWORD):i])) return min((c + WORDCOST.get(s[i-k-1: i], 9e999), k + 1) for k, c in candidates) # Build the cost array. cost = [0] for i in range(1, len(s) + 1): c, k = best_match(i) cost.append(c) # Backtrack to recover the minimal-cost string. out = [] i = len(s) while i > 0: c, k = best_match(i) assert c == cost[i] out.append(s[i-k:i]) i -= k return u" ".join(reversed(out))
def from_const(cls, value, size, dtype=type(None)): """ Constructs an SArray of size with a const value. Parameters ---------- value : [int | float | str | array.array | list | dict | datetime] The value to fill the SArray size : int The size of the SArray dtype : type The type of the SArray. If not specified, is automatically detected from the value. This should be specified if value=None since the actual type of the SArray can be anything. Examples -------- Construct an SArray consisting of 10 zeroes: >>> turicreate.SArray.from_const(0, 10) Construct an SArray consisting of 10 missing string values: >>> turicreate.SArray.from_const(None, 10, str) """ assert isinstance(size, (int, long)) and size >= 0, "size must be a positive int" if not isinstance(value, (type(None), int, float, str, array.array, list, dict, datetime.datetime)): raise TypeError('Cannot create sarray of value type %s' % str(type(value))) proxy = UnitySArrayProxy() proxy.load_from_const(value, size, dtype) return cls(_proxy=proxy)
Constructs an SArray of size with a const value. Parameters ---------- value : [int | float | str | array.array | list | dict | datetime] The value to fill the SArray size : int The size of the SArray dtype : type The type of the SArray. If not specified, is automatically detected from the value. This should be specified if value=None since the actual type of the SArray can be anything. Examples -------- Construct an SArray consisting of 10 zeroes: >>> turicreate.SArray.from_const(0, 10) Construct an SArray consisting of 10 missing string values: >>> turicreate.SArray.from_const(None, 10, str)
Below is the the instruction that describes the task: ### Input: Constructs an SArray of size with a const value. Parameters ---------- value : [int | float | str | array.array | list | dict | datetime] The value to fill the SArray size : int The size of the SArray dtype : type The type of the SArray. If not specified, is automatically detected from the value. This should be specified if value=None since the actual type of the SArray can be anything. Examples -------- Construct an SArray consisting of 10 zeroes: >>> turicreate.SArray.from_const(0, 10) Construct an SArray consisting of 10 missing string values: >>> turicreate.SArray.from_const(None, 10, str) ### Response: def from_const(cls, value, size, dtype=type(None)): """ Constructs an SArray of size with a const value. Parameters ---------- value : [int | float | str | array.array | list | dict | datetime] The value to fill the SArray size : int The size of the SArray dtype : type The type of the SArray. If not specified, is automatically detected from the value. This should be specified if value=None since the actual type of the SArray can be anything. Examples -------- Construct an SArray consisting of 10 zeroes: >>> turicreate.SArray.from_const(0, 10) Construct an SArray consisting of 10 missing string values: >>> turicreate.SArray.from_const(None, 10, str) """ assert isinstance(size, (int, long)) and size >= 0, "size must be a positive int" if not isinstance(value, (type(None), int, float, str, array.array, list, dict, datetime.datetime)): raise TypeError('Cannot create sarray of value type %s' % str(type(value))) proxy = UnitySArrayProxy() proxy.load_from_const(value, size, dtype) return cls(_proxy=proxy)
def show_support_save_status_output_show_support_save_status_percentage_of_completion(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_support_save_status = ET.Element("show_support_save_status") config = show_support_save_status output = ET.SubElement(show_support_save_status, "output") show_support_save_status = ET.SubElement(output, "show-support-save-status") percentage_of_completion = ET.SubElement(show_support_save_status, "percentage-of-completion") percentage_of_completion.text = kwargs.pop('percentage_of_completion') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def show_support_save_status_output_show_support_save_status_percentage_of_completion(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_support_save_status = ET.Element("show_support_save_status") config = show_support_save_status output = ET.SubElement(show_support_save_status, "output") show_support_save_status = ET.SubElement(output, "show-support-save-status") percentage_of_completion = ET.SubElement(show_support_save_status, "percentage-of-completion") percentage_of_completion.text = kwargs.pop('percentage_of_completion') callback = kwargs.pop('callback', self._callback) return callback(config)
def to_csv(self, encoding=export.ENCODING, dialect=export.DIALECT, make_filename=export.MAKE_FILENAME): """Dump all worksheets of the spreadsheet to individual CSV files. Args: encoding (str): result string encoding dialect (str): :mod:`csv` dialect name or object to use make_filename: template or one-argument callable returning the filename If ``make_filename`` is a string, it is string-interpolated with an infos-dictionary with the fields ``id`` (spreadhseet id), ``title`` (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet id), ``index`` (worksheet index), and ``dialect`` CSV dialect to generate the filename: ``filename = make_filename % infos``. If ``make_filename`` is a callable, it will be called with the infos-dictionary to generate the filename: ``filename = make_filename(infos)``. """ for s in self._sheets: s.to_csv(None, encoding, dialect, make_filename)
Dump all worksheets of the spreadsheet to individual CSV files. Args: encoding (str): result string encoding dialect (str): :mod:`csv` dialect name or object to use make_filename: template or one-argument callable returning the filename If ``make_filename`` is a string, it is string-interpolated with an infos-dictionary with the fields ``id`` (spreadhseet id), ``title`` (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet id), ``index`` (worksheet index), and ``dialect`` CSV dialect to generate the filename: ``filename = make_filename % infos``. If ``make_filename`` is a callable, it will be called with the infos-dictionary to generate the filename: ``filename = make_filename(infos)``.
Below is the the instruction that describes the task: ### Input: Dump all worksheets of the spreadsheet to individual CSV files. Args: encoding (str): result string encoding dialect (str): :mod:`csv` dialect name or object to use make_filename: template or one-argument callable returning the filename If ``make_filename`` is a string, it is string-interpolated with an infos-dictionary with the fields ``id`` (spreadhseet id), ``title`` (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet id), ``index`` (worksheet index), and ``dialect`` CSV dialect to generate the filename: ``filename = make_filename % infos``. If ``make_filename`` is a callable, it will be called with the infos-dictionary to generate the filename: ``filename = make_filename(infos)``. ### Response: def to_csv(self, encoding=export.ENCODING, dialect=export.DIALECT, make_filename=export.MAKE_FILENAME): """Dump all worksheets of the spreadsheet to individual CSV files. Args: encoding (str): result string encoding dialect (str): :mod:`csv` dialect name or object to use make_filename: template or one-argument callable returning the filename If ``make_filename`` is a string, it is string-interpolated with an infos-dictionary with the fields ``id`` (spreadhseet id), ``title`` (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet id), ``index`` (worksheet index), and ``dialect`` CSV dialect to generate the filename: ``filename = make_filename % infos``. If ``make_filename`` is a callable, it will be called with the infos-dictionary to generate the filename: ``filename = make_filename(infos)``. """ for s in self._sheets: s.to_csv(None, encoding, dialect, make_filename)
def sanitize_turbo(html, allowed_tags=TURBO_ALLOWED_TAGS, allowed_attrs=TURBO_ALLOWED_ATTRS): """Sanitizes HTML, removing not allowed tags and attributes. :param str|unicode html: :param list allowed_tags: List of allowed tags. :param dict allowed_attrs: Dictionary with attributes allowed for tags. :rtype: unicode """ return clean(html, tags=allowed_tags, attributes=allowed_attrs, strip=True)
Sanitizes HTML, removing not allowed tags and attributes. :param str|unicode html: :param list allowed_tags: List of allowed tags. :param dict allowed_attrs: Dictionary with attributes allowed for tags. :rtype: unicode
Below is the the instruction that describes the task: ### Input: Sanitizes HTML, removing not allowed tags and attributes. :param str|unicode html: :param list allowed_tags: List of allowed tags. :param dict allowed_attrs: Dictionary with attributes allowed for tags. :rtype: unicode ### Response: def sanitize_turbo(html, allowed_tags=TURBO_ALLOWED_TAGS, allowed_attrs=TURBO_ALLOWED_ATTRS): """Sanitizes HTML, removing not allowed tags and attributes. :param str|unicode html: :param list allowed_tags: List of allowed tags. :param dict allowed_attrs: Dictionary with attributes allowed for tags. :rtype: unicode """ return clean(html, tags=allowed_tags, attributes=allowed_attrs, strip=True)
def patch_runtime_class(self, name, body, **kwargs): """ partially update the specified RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the RuntimeClass (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_runtime_class_with_http_info(name, body, **kwargs) else: (data) = self.patch_runtime_class_with_http_info(name, body, **kwargs) return data
partially update the specified RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the RuntimeClass (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: partially update the specified RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the RuntimeClass (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread. ### Response: def patch_runtime_class(self, name, body, **kwargs): """ partially update the specified RuntimeClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the RuntimeClass (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1beta1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_runtime_class_with_http_info(name, body, **kwargs) else: (data) = self.patch_runtime_class_with_http_info(name, body, **kwargs) return data
def star_stats_table(self): """ Take the parsed stats from the STAR report and add them to the basic stats table at the top of the report """ headers = OrderedDict() headers['uniquely_mapped_percent'] = { 'title': '% Aligned', 'description': '% Uniquely mapped reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['uniquely_mapped'] = { 'title': '{} Aligned'.format(config.read_count_prefix), 'description': 'Uniquely mapped reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.star_data, headers)
Take the parsed stats from the STAR report and add them to the basic stats table at the top of the report
Below is the the instruction that describes the task: ### Input: Take the parsed stats from the STAR report and add them to the basic stats table at the top of the report ### Response: def star_stats_table(self): """ Take the parsed stats from the STAR report and add them to the basic stats table at the top of the report """ headers = OrderedDict() headers['uniquely_mapped_percent'] = { 'title': '% Aligned', 'description': '% Uniquely mapped reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['uniquely_mapped'] = { 'title': '{} Aligned'.format(config.read_count_prefix), 'description': 'Uniquely mapped reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.star_data, headers)
def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]: """ For all extensions that are supported, lists all types that can be parsed from this extension. For each type, provide the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: """ check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # For all extensions that are supported, for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching): res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching) return res
For all extensions that are supported, lists all types that can be parsed from this extension. For each type, provide the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return:
Below is the the instruction that describes the task: ### Input: For all extensions that are supported, lists all types that can be parsed from this extension. For each type, provide the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: ### Response: def get_capabilities_by_ext(self, strict_type_matching: bool = False) -> Dict[str, Dict[Type, Dict[str, Parser]]]: """ For all extensions that are supported, lists all types that can be parsed from this extension. For each type, provide the list of parsers supported. The order is "most pertinent first" This method is for monitoring and debug, so we prefer to not rely on the cache, but rather on the query engine. That will ensure consistency of the results. :param strict_type_matching: :return: """ check_var(strict_type_matching, var_types=bool, var_name='strict_matching') res = dict() # For all extensions that are supported, for ext in self.get_all_supported_exts_for_type(type_to_match=JOKER, strict=strict_type_matching): res[ext] = self.get_capabilities_for_ext(ext, strict_type_matching) return res
def expand_url(url, protocol): """ Expands the given URL to a full URL by adding the magento soap/wsdl parts :param url: URL to be expanded :param service: 'xmlrpc' or 'soap' """ if protocol == 'soap': ws_part = 'api/?wsdl' elif protocol == 'xmlrpc': ws_part = 'index.php/api/xmlrpc' else: ws_part = 'index.php/rest/V1' return url.endswith('/') and url + ws_part or url + '/' + ws_part
Expands the given URL to a full URL by adding the magento soap/wsdl parts :param url: URL to be expanded :param service: 'xmlrpc' or 'soap'
Below is the the instruction that describes the task: ### Input: Expands the given URL to a full URL by adding the magento soap/wsdl parts :param url: URL to be expanded :param service: 'xmlrpc' or 'soap' ### Response: def expand_url(url, protocol): """ Expands the given URL to a full URL by adding the magento soap/wsdl parts :param url: URL to be expanded :param service: 'xmlrpc' or 'soap' """ if protocol == 'soap': ws_part = 'api/?wsdl' elif protocol == 'xmlrpc': ws_part = 'index.php/api/xmlrpc' else: ws_part = 'index.php/rest/V1' return url.endswith('/') and url + ws_part or url + '/' + ws_part
def impute_dataframe_range(df_impute, col_to_max, col_to_min, col_to_median): """ Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values from the provided dictionaries. This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by * ``-inf`` -> by value in col_to_min * ``+inf`` -> by value in col_to_max * ``NaN`` -> by value in col_to_median If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError. Also, if one of the values to replace is not finite a ValueError is returned This function modifies `df_impute` in place. Afterwards df_impute is guaranteed to not contain any non-finite values. Also, all columns will be guaranteed to be of type ``np.float64``. :param df_impute: DataFrame to impute :type df_impute: pandas.DataFrame :param col_to_max: Dictionary mapping column names to max values :type col_to_max: dict :param col_to_min: Dictionary mapping column names to min values :type col_to_max: dict :param col_to_median: Dictionary mapping column names to median values :type col_to_max: dict :return df_impute: imputed DataFrame :rtype df_impute: pandas.DataFrame :raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value to replace is non finite """ columns = df_impute.columns # Making sure col_to_median, col_to_max and col_to_min have entries for every column if not set(columns) <= set(col_to_median.keys()) or \ not set(columns) <= set(col_to_max.keys()) or \ not set(columns) <= set(col_to_min.keys()): raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains more or less keys " "than the column names in df") # check if there are non finite values for the replacement if np.any(~np.isfinite(list(col_to_median.values()))) or \ np.any(~np.isfinite(list(col_to_min.values()))) or \ np.any(~np.isfinite(list(col_to_max.values()))): raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains non finite values " "to replace") # Make the replacement dataframes as large as the real one col_to_max = pd.DataFrame([col_to_max]*len(df_impute), index=df_impute.index) col_to_min = pd.DataFrame([col_to_min]*len(df_impute), index=df_impute.index) col_to_median = pd.DataFrame([col_to_median]*len(df_impute), index=df_impute.index) df_impute.where(df_impute.values != np.PINF, other=col_to_max, inplace=True) df_impute.where(df_impute.values != np.NINF, other=col_to_min, inplace=True) df_impute.where(~np.isnan(df_impute.values), other=col_to_median, inplace=True) df_impute.astype(np.float64, copy=False) return df_impute
Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values from the provided dictionaries. This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by * ``-inf`` -> by value in col_to_min * ``+inf`` -> by value in col_to_max * ``NaN`` -> by value in col_to_median If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError. Also, if one of the values to replace is not finite a ValueError is returned This function modifies `df_impute` in place. Afterwards df_impute is guaranteed to not contain any non-finite values. Also, all columns will be guaranteed to be of type ``np.float64``. :param df_impute: DataFrame to impute :type df_impute: pandas.DataFrame :param col_to_max: Dictionary mapping column names to max values :type col_to_max: dict :param col_to_min: Dictionary mapping column names to min values :type col_to_max: dict :param col_to_median: Dictionary mapping column names to median values :type col_to_max: dict :return df_impute: imputed DataFrame :rtype df_impute: pandas.DataFrame :raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value to replace is non finite
Below is the the instruction that describes the task: ### Input: Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values from the provided dictionaries. This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by * ``-inf`` -> by value in col_to_min * ``+inf`` -> by value in col_to_max * ``NaN`` -> by value in col_to_median If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError. Also, if one of the values to replace is not finite a ValueError is returned This function modifies `df_impute` in place. Afterwards df_impute is guaranteed to not contain any non-finite values. Also, all columns will be guaranteed to be of type ``np.float64``. :param df_impute: DataFrame to impute :type df_impute: pandas.DataFrame :param col_to_max: Dictionary mapping column names to max values :type col_to_max: dict :param col_to_min: Dictionary mapping column names to min values :type col_to_max: dict :param col_to_median: Dictionary mapping column names to median values :type col_to_max: dict :return df_impute: imputed DataFrame :rtype df_impute: pandas.DataFrame :raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value to replace is non finite ### Response: def impute_dataframe_range(df_impute, col_to_max, col_to_min, col_to_median): """ Columnwise replaces all ``NaNs``, ``-inf`` and ``+inf`` from the DataFrame `df_impute` with average/extreme values from the provided dictionaries. This is done as follows: Each occurring ``inf`` or ``NaN`` in `df_impute` is replaced by * ``-inf`` -> by value in col_to_min * ``+inf`` -> by value in col_to_max * ``NaN`` -> by value in col_to_median If a column of df_impute is not found in the one of the dictionaries, this method will raise a ValueError. Also, if one of the values to replace is not finite a ValueError is returned This function modifies `df_impute` in place. Afterwards df_impute is guaranteed to not contain any non-finite values. Also, all columns will be guaranteed to be of type ``np.float64``. :param df_impute: DataFrame to impute :type df_impute: pandas.DataFrame :param col_to_max: Dictionary mapping column names to max values :type col_to_max: dict :param col_to_min: Dictionary mapping column names to min values :type col_to_max: dict :param col_to_median: Dictionary mapping column names to median values :type col_to_max: dict :return df_impute: imputed DataFrame :rtype df_impute: pandas.DataFrame :raise ValueError: if a column of df_impute is missing in col_to_max, col_to_min or col_to_median or a value to replace is non finite """ columns = df_impute.columns # Making sure col_to_median, col_to_max and col_to_min have entries for every column if not set(columns) <= set(col_to_median.keys()) or \ not set(columns) <= set(col_to_max.keys()) or \ not set(columns) <= set(col_to_min.keys()): raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains more or less keys " "than the column names in df") # check if there are non finite values for the replacement if np.any(~np.isfinite(list(col_to_median.values()))) or \ np.any(~np.isfinite(list(col_to_min.values()))) or \ np.any(~np.isfinite(list(col_to_max.values()))): raise ValueError("Some of the dictionaries col_to_median, col_to_max, col_to_min contains non finite values " "to replace") # Make the replacement dataframes as large as the real one col_to_max = pd.DataFrame([col_to_max]*len(df_impute), index=df_impute.index) col_to_min = pd.DataFrame([col_to_min]*len(df_impute), index=df_impute.index) col_to_median = pd.DataFrame([col_to_median]*len(df_impute), index=df_impute.index) df_impute.where(df_impute.values != np.PINF, other=col_to_max, inplace=True) df_impute.where(df_impute.values != np.NINF, other=col_to_min, inplace=True) df_impute.where(~np.isnan(df_impute.values), other=col_to_median, inplace=True) df_impute.astype(np.float64, copy=False) return df_impute
def multiple_choice_field_data(field, **kwargs): """ Return random value for MultipleChoiceField >>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')] >>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES)) >>> type(result) <type 'str'> """ if field.choices: from django_any.functions import valid_choices l = list(valid_choices(field.choices)) random.shuffle(l) choices = [] count = xunit.any_int(min_value=1, max_value=len(field.choices)) for i in xrange(0, count): choices.append(l[i]) return ' '.join(choices) return 'None'
Return random value for MultipleChoiceField >>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')] >>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES)) >>> type(result) <type 'str'>
Below is the the instruction that describes the task: ### Input: Return random value for MultipleChoiceField >>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')] >>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES)) >>> type(result) <type 'str'> ### Response: def multiple_choice_field_data(field, **kwargs): """ Return random value for MultipleChoiceField >>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')] >>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES)) >>> type(result) <type 'str'> """ if field.choices: from django_any.functions import valid_choices l = list(valid_choices(field.choices)) random.shuffle(l) choices = [] count = xunit.any_int(min_value=1, max_value=len(field.choices)) for i in xrange(0, count): choices.append(l[i]) return ' '.join(choices) return 'None'
def get_properties(elt, keys=None, ctx=None): """Get elt properties. :param elt: properties elt. Not None methods or unhashable types. :param keys: key(s) of properties to get from elt. If None, get all properties. :type keys: list or str :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :return: list of properties by elt and name. :rtype: list """ # initialize keys if str if isinstance(keys, string_types): keys = (keys,) result = _get_properties(elt, keys=keys, local=False, ctx=ctx) return result
Get elt properties. :param elt: properties elt. Not None methods or unhashable types. :param keys: key(s) of properties to get from elt. If None, get all properties. :type keys: list or str :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :return: list of properties by elt and name. :rtype: list
Below is the the instruction that describes the task: ### Input: Get elt properties. :param elt: properties elt. Not None methods or unhashable types. :param keys: key(s) of properties to get from elt. If None, get all properties. :type keys: list or str :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :return: list of properties by elt and name. :rtype: list ### Response: def get_properties(elt, keys=None, ctx=None): """Get elt properties. :param elt: properties elt. Not None methods or unhashable types. :param keys: key(s) of properties to get from elt. If None, get all properties. :type keys: list or str :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. :return: list of properties by elt and name. :rtype: list """ # initialize keys if str if isinstance(keys, string_types): keys = (keys,) result = _get_properties(elt, keys=keys, local=False, ctx=ctx) return result
def perform_experiment(self, engine_list): """ Performs nearest neighbour recall experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (recall, precision, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. """ # We will fill this array with measures for all the engines. result = [] # For each engine, first index vectors and then retrieve neighbours for endine_idx, engine in enumerate(engine_list): print('Engine %d / %d' % (endine_idx, len(engine_list))) # Clean storage engine.clean_all_buckets() # Use this to compute average recall avg_recall = 0.0 # Use this to compute average precision avg_precision = 0.0 # Use this to compute average search time avg_search_time = 0.0 # Index all vectors and store them for index, v in enumerate(self.vectors): engine.store_vector(v, 'data_%d' % index) # Look for N nearest neighbours for query vectors for index in self.query_indices: # Get indices of the real nearest as set real_nearest = set(self.closest[index]) # We have to time the search search_time_start = time.time() # Get nearest N according to engine nearest = engine.neighbours(self.vectors[index]) # Get search time search_time = time.time() - search_time_start # For comparance we need their indices (as set) nearest = set([self.__index_of_vector(x[0]) for x in nearest]) # Remove query index from search result to make sure that # recall and precision make sense in terms of "neighbours". # If ONLY the query vector is retrieved, we want recall to be # zero! nearest.remove(index) # If the result list is empty, recall and precision are 0.0 if len(nearest) == 0: recall = 0.0 precision = 0.0 else: # Get intersection count inter_count = float(len(real_nearest & nearest)) # Normalize recall for this vector recall = inter_count/float(len(real_nearest)) # Normalize precision for this vector precision = inter_count/float(len(nearest)) # Add to accumulator avg_recall += recall # Add to accumulator avg_precision += precision # Add to accumulator avg_search_time += search_time # Normalize recall over query set avg_recall /= float(len(self.query_indices)) # Normalize precision over query set avg_precision /= float(len(self.query_indices)) # Normalize search time over query set avg_search_time = avg_search_time / float(len(self.query_indices)) # Normalize search time with respect to exact search avg_search_time /= self.exact_search_time_per_vector print(' recall=%f, precision=%f, time=%f' % (avg_recall, avg_precision, avg_search_time)) result.append((avg_recall, avg_precision, avg_search_time)) # Return (recall, precision, search_time) tuple return result
Performs nearest neighbour recall experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (recall, precision, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time.
Below is the the instruction that describes the task: ### Input: Performs nearest neighbour recall experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (recall, precision, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. ### Response: def perform_experiment(self, engine_list): """ Performs nearest neighbour recall experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (recall, precision, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. """ # We will fill this array with measures for all the engines. result = [] # For each engine, first index vectors and then retrieve neighbours for endine_idx, engine in enumerate(engine_list): print('Engine %d / %d' % (endine_idx, len(engine_list))) # Clean storage engine.clean_all_buckets() # Use this to compute average recall avg_recall = 0.0 # Use this to compute average precision avg_precision = 0.0 # Use this to compute average search time avg_search_time = 0.0 # Index all vectors and store them for index, v in enumerate(self.vectors): engine.store_vector(v, 'data_%d' % index) # Look for N nearest neighbours for query vectors for index in self.query_indices: # Get indices of the real nearest as set real_nearest = set(self.closest[index]) # We have to time the search search_time_start = time.time() # Get nearest N according to engine nearest = engine.neighbours(self.vectors[index]) # Get search time search_time = time.time() - search_time_start # For comparance we need their indices (as set) nearest = set([self.__index_of_vector(x[0]) for x in nearest]) # Remove query index from search result to make sure that # recall and precision make sense in terms of "neighbours". # If ONLY the query vector is retrieved, we want recall to be # zero! nearest.remove(index) # If the result list is empty, recall and precision are 0.0 if len(nearest) == 0: recall = 0.0 precision = 0.0 else: # Get intersection count inter_count = float(len(real_nearest & nearest)) # Normalize recall for this vector recall = inter_count/float(len(real_nearest)) # Normalize precision for this vector precision = inter_count/float(len(nearest)) # Add to accumulator avg_recall += recall # Add to accumulator avg_precision += precision # Add to accumulator avg_search_time += search_time # Normalize recall over query set avg_recall /= float(len(self.query_indices)) # Normalize precision over query set avg_precision /= float(len(self.query_indices)) # Normalize search time over query set avg_search_time = avg_search_time / float(len(self.query_indices)) # Normalize search time with respect to exact search avg_search_time /= self.exact_search_time_per_vector print(' recall=%f, precision=%f, time=%f' % (avg_recall, avg_precision, avg_search_time)) result.append((avg_recall, avg_precision, avg_search_time)) # Return (recall, precision, search_time) tuple return result
def build_model_classes(metadata): """Generate a model class for any models contained in the specified spec file.""" i = importlib.import_module(metadata) env = get_jinja_env() model_template = env.get_template('model.py.jinja2') for model in i.models: with open(model_path(model.name.lower()), 'w') as t: t.write(model_template.render(model_md=model))
Generate a model class for any models contained in the specified spec file.
Below is the the instruction that describes the task: ### Input: Generate a model class for any models contained in the specified spec file. ### Response: def build_model_classes(metadata): """Generate a model class for any models contained in the specified spec file.""" i = importlib.import_module(metadata) env = get_jinja_env() model_template = env.get_template('model.py.jinja2') for model in i.models: with open(model_path(model.name.lower()), 'w') as t: t.write(model_template.render(model_md=model))
def dispatch(splits, *funcs, **kwargs): """takes multiple iterables (returned by dispatch or broadcast) and delivers the items to multiple functions /-----> _INPUT1 --> double(_INPUT1) --> \ / \ splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT \ / \--> _INPUT3 --> quadruple(_INPUT3) --> / One way to construct such a flow in code would be:: splits = repeat(('bar', 'baz', 'qux'), 3) double = lambda word: word * 2 triple = lambda word: word * 3 quadruple = lambda word: word * 4 _OUTPUT = dispatch(splits, double, triple, quadruple) _OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3) """ map_func = kwargs.get('map_func', _map_func) apply_func = kwargs.get('apply_func', _apply_func) return map_func(partial(apply_func, funcs), splits)
takes multiple iterables (returned by dispatch or broadcast) and delivers the items to multiple functions /-----> _INPUT1 --> double(_INPUT1) --> \ / \ splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT \ / \--> _INPUT3 --> quadruple(_INPUT3) --> / One way to construct such a flow in code would be:: splits = repeat(('bar', 'baz', 'qux'), 3) double = lambda word: word * 2 triple = lambda word: word * 3 quadruple = lambda word: word * 4 _OUTPUT = dispatch(splits, double, triple, quadruple) _OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3)
Below is the the instruction that describes the task: ### Input: takes multiple iterables (returned by dispatch or broadcast) and delivers the items to multiple functions /-----> _INPUT1 --> double(_INPUT1) --> \ / \ splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT \ / \--> _INPUT3 --> quadruple(_INPUT3) --> / One way to construct such a flow in code would be:: splits = repeat(('bar', 'baz', 'qux'), 3) double = lambda word: word * 2 triple = lambda word: word * 3 quadruple = lambda word: word * 4 _OUTPUT = dispatch(splits, double, triple, quadruple) _OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3) ### Response: def dispatch(splits, *funcs, **kwargs): """takes multiple iterables (returned by dispatch or broadcast) and delivers the items to multiple functions /-----> _INPUT1 --> double(_INPUT1) --> \ / \ splits ------> _INPUT2 --> triple(_INPUT2) ---> _OUTPUT \ / \--> _INPUT3 --> quadruple(_INPUT3) --> / One way to construct such a flow in code would be:: splits = repeat(('bar', 'baz', 'qux'), 3) double = lambda word: word * 2 triple = lambda word: word * 3 quadruple = lambda word: word * 4 _OUTPUT = dispatch(splits, double, triple, quadruple) _OUTPUT == repeat(('barbar', 'bazbazbaz', 'quxquxquxqux'), 3) """ map_func = kwargs.get('map_func', _map_func) apply_func = kwargs.get('apply_func', _apply_func) return map_func(partial(apply_func, funcs), splits)
def create_prediction_estimator(hyper_params, model, checkpoint_path=None): """ Create an estimator for prediction purpose only. :param hyper_params: The hyper params file. :param model: The keras model. :param checkpoint_path: (Optional) Path to the specific checkpoint to use. :return: """ if checkpoint_path is None: chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)]) checkpoint_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1] print("Latest found checkpoint: {}".format(checkpoint_path)) estimator_spec = create_tf_estimator_spec(checkpoint_path, model, create_loss=None) # Create the estimator. estimator = tf.estimator.Estimator(estimator_spec, model_dir=checkpoint_path, params=hyper_params) return estimator
Create an estimator for prediction purpose only. :param hyper_params: The hyper params file. :param model: The keras model. :param checkpoint_path: (Optional) Path to the specific checkpoint to use. :return:
Below is the the instruction that describes the task: ### Input: Create an estimator for prediction purpose only. :param hyper_params: The hyper params file. :param model: The keras model. :param checkpoint_path: (Optional) Path to the specific checkpoint to use. :return: ### Response: def create_prediction_estimator(hyper_params, model, checkpoint_path=None): """ Create an estimator for prediction purpose only. :param hyper_params: The hyper params file. :param model: The keras model. :param checkpoint_path: (Optional) Path to the specific checkpoint to use. :return: """ if checkpoint_path is None: chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)]) checkpoint_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1] print("Latest found checkpoint: {}".format(checkpoint_path)) estimator_spec = create_tf_estimator_spec(checkpoint_path, model, create_loss=None) # Create the estimator. estimator = tf.estimator.Estimator(estimator_spec, model_dir=checkpoint_path, params=hyper_params) return estimator
def course_or_program_exist(self, course_id, program_uuid): """ Return whether the input course or program exist. """ course_exists = course_id and CourseApiClient().get_course_details(course_id) program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid) return course_exists or program_exists
Return whether the input course or program exist.
Below is the the instruction that describes the task: ### Input: Return whether the input course or program exist. ### Response: def course_or_program_exist(self, course_id, program_uuid): """ Return whether the input course or program exist. """ course_exists = course_id and CourseApiClient().get_course_details(course_id) program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid) return course_exists or program_exists
def get_item(self, address, state = 'fresh'): """Get an item from the cache. :Parameters: - `address`: its address. - `state`: the worst state that is acceptable. :Types: - `address`: any hashable - `state`: `str` :return: the item or `None` if it was not found. :returntype: `CacheItem`""" self._lock.acquire() try: item = self._items.get(address) if not item: return None self.update_item(item) if _state_values[state] >= item.state_value: return item return None finally: self._lock.release()
Get an item from the cache. :Parameters: - `address`: its address. - `state`: the worst state that is acceptable. :Types: - `address`: any hashable - `state`: `str` :return: the item or `None` if it was not found. :returntype: `CacheItem`
Below is the the instruction that describes the task: ### Input: Get an item from the cache. :Parameters: - `address`: its address. - `state`: the worst state that is acceptable. :Types: - `address`: any hashable - `state`: `str` :return: the item or `None` if it was not found. :returntype: `CacheItem` ### Response: def get_item(self, address, state = 'fresh'): """Get an item from the cache. :Parameters: - `address`: its address. - `state`: the worst state that is acceptable. :Types: - `address`: any hashable - `state`: `str` :return: the item or `None` if it was not found. :returntype: `CacheItem`""" self._lock.acquire() try: item = self._items.get(address) if not item: return None self.update_item(item) if _state_values[state] >= item.state_value: return item return None finally: self._lock.release()
def render(self, text, auth=None): """ Renders the specified markdown content and embedded styles. Raises TypeError if text is not a Unicode string. Raises requests.HTTPError if the request fails. """ # Ensure text is Unicode expected = str if sys.version_info[0] >= 3 else unicode # noqa if not isinstance(text, expected): raise TypeError( 'Expected a Unicode string, got {!r}.'.format(text)) if self.user_content: url = '{0}/markdown'.format(self.api_url) data = {'text': text, 'mode': 'gfm'} if self.context: data['context'] = self.context data = json.dumps(data, ensure_ascii=False).encode('utf-8') headers = {'content-type': 'application/json; charset=UTF-8'} else: url = '{0}/markdown/raw'.format(self.api_url) data = text.encode('utf-8') headers = {'content-type': 'text/x-markdown; charset=UTF-8'} r = requests.post(url, headers=headers, data=data, auth=auth) r.raise_for_status() # FUTURE: Remove this once GitHub API properly handles Unicode markdown r.encoding = 'utf-8' return r.text if self.raw else patch(r.text)
Renders the specified markdown content and embedded styles. Raises TypeError if text is not a Unicode string. Raises requests.HTTPError if the request fails.
Below is the the instruction that describes the task: ### Input: Renders the specified markdown content and embedded styles. Raises TypeError if text is not a Unicode string. Raises requests.HTTPError if the request fails. ### Response: def render(self, text, auth=None): """ Renders the specified markdown content and embedded styles. Raises TypeError if text is not a Unicode string. Raises requests.HTTPError if the request fails. """ # Ensure text is Unicode expected = str if sys.version_info[0] >= 3 else unicode # noqa if not isinstance(text, expected): raise TypeError( 'Expected a Unicode string, got {!r}.'.format(text)) if self.user_content: url = '{0}/markdown'.format(self.api_url) data = {'text': text, 'mode': 'gfm'} if self.context: data['context'] = self.context data = json.dumps(data, ensure_ascii=False).encode('utf-8') headers = {'content-type': 'application/json; charset=UTF-8'} else: url = '{0}/markdown/raw'.format(self.api_url) data = text.encode('utf-8') headers = {'content-type': 'text/x-markdown; charset=UTF-8'} r = requests.post(url, headers=headers, data=data, auth=auth) r.raise_for_status() # FUTURE: Remove this once GitHub API properly handles Unicode markdown r.encoding = 'utf-8' return r.text if self.raw else patch(r.text)
def __parse_json_file(self, file_path): """Process Json file data :@param file_path :@type file_path: string :@throws IOError """ if file_path == '' or os.path.splitext(file_path)[1] != '.json': raise IOError('Invalid Json file') with open(file_path) as json_file: self._raw_data = json.load(json_file) self._json_data = copy.deepcopy(self._raw_data)
Process Json file data :@param file_path :@type file_path: string :@throws IOError
Below is the the instruction that describes the task: ### Input: Process Json file data :@param file_path :@type file_path: string :@throws IOError ### Response: def __parse_json_file(self, file_path): """Process Json file data :@param file_path :@type file_path: string :@throws IOError """ if file_path == '' or os.path.splitext(file_path)[1] != '.json': raise IOError('Invalid Json file') with open(file_path) as json_file: self._raw_data = json.load(json_file) self._json_data = copy.deepcopy(self._raw_data)
def next(self): '''next It generates tuple of data. For example, if :py:meth:`self._variables == ('x', 'y')` This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )` Returns: tuple: tuple of data for mini-batch in numpy.ndarray. ''' if self._use_thread: # Wait for finish previous thread. self._next_thread.join() if self._current_data is None: logger.log(99, 'next() got None retrying.') self._next_thread = threading.Thread(target=self._next) self._next_thread.start() self._next_thread.join() self._current_epoch, data = self._current_data # Start next thread. self._next_thread = threading.Thread(target=self._next) self._next_thread.start() else: self._next() self._current_epoch, data = self._current_data return data
next It generates tuple of data. For example, if :py:meth:`self._variables == ('x', 'y')` This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )` Returns: tuple: tuple of data for mini-batch in numpy.ndarray.
Below is the the instruction that describes the task: ### Input: next It generates tuple of data. For example, if :py:meth:`self._variables == ('x', 'y')` This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )` Returns: tuple: tuple of data for mini-batch in numpy.ndarray. ### Response: def next(self): '''next It generates tuple of data. For example, if :py:meth:`self._variables == ('x', 'y')` This method returns :py:meth:` ( [[X] * batch_size], [[Y] * batch_size] )` Returns: tuple: tuple of data for mini-batch in numpy.ndarray. ''' if self._use_thread: # Wait for finish previous thread. self._next_thread.join() if self._current_data is None: logger.log(99, 'next() got None retrying.') self._next_thread = threading.Thread(target=self._next) self._next_thread.start() self._next_thread.join() self._current_epoch, data = self._current_data # Start next thread. self._next_thread = threading.Thread(target=self._next) self._next_thread.start() else: self._next() self._current_epoch, data = self._current_data return data
def command(self, *cmd): """ Sends a command or sequence of commands through to the I²C address - maximum allowed is 32 bytes in one go. :param cmd: A spread of commands. :type cmd: int :raises luma.core.error.DeviceNotFoundError: I2C device could not be found. """ assert(len(cmd) <= 32) try: self._bus.write_i2c_block_data(self._addr, self._cmd_mode, list(cmd)) except (IOError, OSError) as e: if e.errno in [errno.EREMOTEIO, errno.EIO]: # I/O error raise luma.core.error.DeviceNotFoundError( 'I2C device not found on address: 0x{0:02X}'.format(self._addr)) else: # pragma: no cover raise
Sends a command or sequence of commands through to the I²C address - maximum allowed is 32 bytes in one go. :param cmd: A spread of commands. :type cmd: int :raises luma.core.error.DeviceNotFoundError: I2C device could not be found.
Below is the the instruction that describes the task: ### Input: Sends a command or sequence of commands through to the I²C address - maximum allowed is 32 bytes in one go. :param cmd: A spread of commands. :type cmd: int :raises luma.core.error.DeviceNotFoundError: I2C device could not be found. ### Response: def command(self, *cmd): """ Sends a command or sequence of commands through to the I²C address - maximum allowed is 32 bytes in one go. :param cmd: A spread of commands. :type cmd: int :raises luma.core.error.DeviceNotFoundError: I2C device could not be found. """ assert(len(cmd) <= 32) try: self._bus.write_i2c_block_data(self._addr, self._cmd_mode, list(cmd)) except (IOError, OSError) as e: if e.errno in [errno.EREMOTEIO, errno.EIO]: # I/O error raise luma.core.error.DeviceNotFoundError( 'I2C device not found on address: 0x{0:02X}'.format(self._addr)) else: # pragma: no cover raise
def markov_network(potentials): """Creates a Markov Network from potentials. A Markov Network is also knows as a `Markov Random Field`_ Parameters ---------- potentials : dict[tuple, dict] A dict where the keys are either nodes or edges and the values are a dictionary of potentials. The potential dict should map each possible assignment of the nodes/edges to their energy. Returns ------- MN : :obj:`networkx.Graph` A markov network as a graph where each node/edge stores its potential dict as above. Examples -------- >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> MN['a']['b']['potential'][(0, 0)] -1 .. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field """ G = nx.Graph() G.name = 'markov_network({!r})'.format(potentials) # we use 'clique' because the keys of potentials can be either nodes or # edges, but in either case they are fully connected. for clique, phis in potentials.items(): num_vars = len(clique) # because this data potentially wont be used for a while, let's do some # input checking now and save some debugging issues later if not isinstance(phis, abc.Mapping): raise TypeError("phis should be a dict") elif not all(config in phis for config in itertools.product((0, 1), repeat=num_vars)): raise ValueError("not all potentials provided for {!r}".format(clique)) if num_vars == 1: u, = clique G.add_node(u, potential=phis) elif num_vars == 2: u, v = clique # in python<=3.5 the edge order might not be consistent so we store # the relevant order of the variables relative to the potentials G.add_edge(u, v, potential=phis, order=(u, v)) else: # developer note: in principle supporting larger cliques can be done # using higher-order, but it would make the use of networkx graphs # far more difficult raise ValueError("Only supports cliques up to size 2") return G
Creates a Markov Network from potentials. A Markov Network is also knows as a `Markov Random Field`_ Parameters ---------- potentials : dict[tuple, dict] A dict where the keys are either nodes or edges and the values are a dictionary of potentials. The potential dict should map each possible assignment of the nodes/edges to their energy. Returns ------- MN : :obj:`networkx.Graph` A markov network as a graph where each node/edge stores its potential dict as above. Examples -------- >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> MN['a']['b']['potential'][(0, 0)] -1 .. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field
Below is the the instruction that describes the task: ### Input: Creates a Markov Network from potentials. A Markov Network is also knows as a `Markov Random Field`_ Parameters ---------- potentials : dict[tuple, dict] A dict where the keys are either nodes or edges and the values are a dictionary of potentials. The potential dict should map each possible assignment of the nodes/edges to their energy. Returns ------- MN : :obj:`networkx.Graph` A markov network as a graph where each node/edge stores its potential dict as above. Examples -------- >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> MN['a']['b']['potential'][(0, 0)] -1 .. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field ### Response: def markov_network(potentials): """Creates a Markov Network from potentials. A Markov Network is also knows as a `Markov Random Field`_ Parameters ---------- potentials : dict[tuple, dict] A dict where the keys are either nodes or edges and the values are a dictionary of potentials. The potential dict should map each possible assignment of the nodes/edges to their energy. Returns ------- MN : :obj:`networkx.Graph` A markov network as a graph where each node/edge stores its potential dict as above. Examples -------- >>> potentials = {('a', 'b'): {(0, 0): -1, ... (0, 1): .5, ... (1, 0): .5, ... (1, 1): 2}} >>> MN = dnx.markov_network(potentials) >>> MN['a']['b']['potential'][(0, 0)] -1 .. _Markov Random Field: https://en.wikipedia.org/wiki/Markov_random_field """ G = nx.Graph() G.name = 'markov_network({!r})'.format(potentials) # we use 'clique' because the keys of potentials can be either nodes or # edges, but in either case they are fully connected. for clique, phis in potentials.items(): num_vars = len(clique) # because this data potentially wont be used for a while, let's do some # input checking now and save some debugging issues later if not isinstance(phis, abc.Mapping): raise TypeError("phis should be a dict") elif not all(config in phis for config in itertools.product((0, 1), repeat=num_vars)): raise ValueError("not all potentials provided for {!r}".format(clique)) if num_vars == 1: u, = clique G.add_node(u, potential=phis) elif num_vars == 2: u, v = clique # in python<=3.5 the edge order might not be consistent so we store # the relevant order of the variables relative to the potentials G.add_edge(u, v, potential=phis, order=(u, v)) else: # developer note: in principle supporting larger cliques can be done # using higher-order, but it would make the use of networkx graphs # far more difficult raise ValueError("Only supports cliques up to size 2") return G
def _connect_mv_node(network, node, target_obj): """Connects MV node to target object in MV grid If the target object is a node, a new line is created to it. If the target object is a line, the node is connected to a newly created branch tee (using perpendicular projection) on this line. New lines are created using standard equipment. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object node : :class:`~.grid.components.Component` Node to connect (e.g. :class:`~.grid.components.Generator`) Node must be a member of MV grid's graph (network.mv_grid.graph) target_obj : :class:`~.grid.components.Component` Object that node shall be connected to Returns ------- :class:`~.grid.components.Component` or None Node that node was connected to Notes ----- Adapted from `Ding0 <https://github.com/openego/ding0/blob/\ 21a52048f84ec341fe54e0204ac62228a9e8a32a/\ ding0/grid/mv_grid/mv_connect.py#L311>`_. """ # get standard equipment std_line_type = network.equipment_data['mv_cables'].loc[ network.config['grid_expansion_standard_equipment']['mv_line']] std_line_kind = 'cable' target_obj_result = None node_shp = transform(proj2equidistant(network), node.geom) # MV line is nearest connection point if isinstance(target_obj['shp'], LineString): adj_node1 = target_obj['obj']['adj_nodes'][0] adj_node2 = target_obj['obj']['adj_nodes'][1] # find nearest point on MV line conn_point_shp = target_obj['shp'].interpolate(target_obj['shp'].project(node_shp)) conn_point_shp = transform(proj2conformal(network), conn_point_shp) line = network.mv_grid.graph.edge[adj_node1][adj_node2] # target MV line does currently not connect a load area of type aggregated if not line['type'] == 'line_aggr': # create branch tee and add it to grid branch_tee = BranchTee(geom=conn_point_shp, grid=network.mv_grid, in_building=False) network.mv_grid.graph.add_node(branch_tee, type='branch_tee') # split old branch into 2 segments # (delete old branch and create 2 new ones along cable_dist) # ========================================================== # backup kind and type of branch line_kind = line['line'].kind line_type = line['line'].type # remove line from graph network.mv_grid.graph.remove_edge(adj_node1, adj_node2) # delete line from equipment changes if existing _del_cable_from_equipment_changes(network=network, line=line['line']) line_length = calc_geo_dist_vincenty(network=network, node_source=adj_node1, node_target=branch_tee) line = Line(id=random.randint(10 ** 8, 10 ** 9), length=line_length / 1e3, quantity=1, kind=line_kind, type=line_type, grid=network.mv_grid) network.mv_grid.graph.add_edge(adj_node1, branch_tee, line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) line_length = calc_geo_dist_vincenty(network=network, node_source=adj_node2, node_target=branch_tee) line = Line(id=random.randint(10 ** 8, 10 ** 9), length=line_length / 1e3, quantity=1, kind=line_kind, type=line_type, grid=network.mv_grid) network.mv_grid.graph.add_edge(adj_node2, branch_tee, line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) # add new branch for new node (node to branch tee) # ================================================ line_length = calc_geo_dist_vincenty(network=network, node_source=node, node_target=branch_tee) line = Line(id=random.randint(10 ** 8, 10 ** 9), length=line_length / 1e3, quantity=1, kind=std_line_kind, type=std_line_type, grid=network.mv_grid) network.mv_grid.graph.add_edge(node, branch_tee, line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) target_obj_result = branch_tee # node ist nearest connection point else: # what kind of node is to be connected? (which type is node of?) # LVStation: Connect to LVStation or BranchTee # Generator: Connect to LVStation, BranchTee or Generator if isinstance(node, LVStation): valid_conn_objects = (LVStation, BranchTee) elif isinstance(node, Generator): valid_conn_objects = (LVStation, BranchTee, Generator) else: raise ValueError('Oops, the node you are trying to connect is not a valid connection object') # if target is generator or Load, check if it is aggregated (=> connection not allowed) if isinstance(target_obj['obj'], (Generator, Load)): target_is_aggregated = any([_ for _ in network.mv_grid.graph.edge[target_obj['obj']].values() if _['type'] == 'line_aggr']) else: target_is_aggregated = False # target node is not a load area of type aggregated if isinstance(target_obj['obj'], valid_conn_objects) and not target_is_aggregated: # add new branch for satellite (station to station) line_length = calc_geo_dist_vincenty(network=network, node_source=node, node_target=target_obj['obj']) line = Line(id=random.randint(10 ** 8, 10 ** 9), type=std_line_type, kind=std_line_kind, quantity=1, length=line_length / 1e3, grid=network.mv_grid) network.mv_grid.graph.add_edge(node, target_obj['obj'], line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) target_obj_result = target_obj['obj'] return target_obj_result
Connects MV node to target object in MV grid If the target object is a node, a new line is created to it. If the target object is a line, the node is connected to a newly created branch tee (using perpendicular projection) on this line. New lines are created using standard equipment. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object node : :class:`~.grid.components.Component` Node to connect (e.g. :class:`~.grid.components.Generator`) Node must be a member of MV grid's graph (network.mv_grid.graph) target_obj : :class:`~.grid.components.Component` Object that node shall be connected to Returns ------- :class:`~.grid.components.Component` or None Node that node was connected to Notes ----- Adapted from `Ding0 <https://github.com/openego/ding0/blob/\ 21a52048f84ec341fe54e0204ac62228a9e8a32a/\ ding0/grid/mv_grid/mv_connect.py#L311>`_.
Below is the the instruction that describes the task: ### Input: Connects MV node to target object in MV grid If the target object is a node, a new line is created to it. If the target object is a line, the node is connected to a newly created branch tee (using perpendicular projection) on this line. New lines are created using standard equipment. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object node : :class:`~.grid.components.Component` Node to connect (e.g. :class:`~.grid.components.Generator`) Node must be a member of MV grid's graph (network.mv_grid.graph) target_obj : :class:`~.grid.components.Component` Object that node shall be connected to Returns ------- :class:`~.grid.components.Component` or None Node that node was connected to Notes ----- Adapted from `Ding0 <https://github.com/openego/ding0/blob/\ 21a52048f84ec341fe54e0204ac62228a9e8a32a/\ ding0/grid/mv_grid/mv_connect.py#L311>`_. ### Response: def _connect_mv_node(network, node, target_obj): """Connects MV node to target object in MV grid If the target object is a node, a new line is created to it. If the target object is a line, the node is connected to a newly created branch tee (using perpendicular projection) on this line. New lines are created using standard equipment. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object node : :class:`~.grid.components.Component` Node to connect (e.g. :class:`~.grid.components.Generator`) Node must be a member of MV grid's graph (network.mv_grid.graph) target_obj : :class:`~.grid.components.Component` Object that node shall be connected to Returns ------- :class:`~.grid.components.Component` or None Node that node was connected to Notes ----- Adapted from `Ding0 <https://github.com/openego/ding0/blob/\ 21a52048f84ec341fe54e0204ac62228a9e8a32a/\ ding0/grid/mv_grid/mv_connect.py#L311>`_. """ # get standard equipment std_line_type = network.equipment_data['mv_cables'].loc[ network.config['grid_expansion_standard_equipment']['mv_line']] std_line_kind = 'cable' target_obj_result = None node_shp = transform(proj2equidistant(network), node.geom) # MV line is nearest connection point if isinstance(target_obj['shp'], LineString): adj_node1 = target_obj['obj']['adj_nodes'][0] adj_node2 = target_obj['obj']['adj_nodes'][1] # find nearest point on MV line conn_point_shp = target_obj['shp'].interpolate(target_obj['shp'].project(node_shp)) conn_point_shp = transform(proj2conformal(network), conn_point_shp) line = network.mv_grid.graph.edge[adj_node1][adj_node2] # target MV line does currently not connect a load area of type aggregated if not line['type'] == 'line_aggr': # create branch tee and add it to grid branch_tee = BranchTee(geom=conn_point_shp, grid=network.mv_grid, in_building=False) network.mv_grid.graph.add_node(branch_tee, type='branch_tee') # split old branch into 2 segments # (delete old branch and create 2 new ones along cable_dist) # ========================================================== # backup kind and type of branch line_kind = line['line'].kind line_type = line['line'].type # remove line from graph network.mv_grid.graph.remove_edge(adj_node1, adj_node2) # delete line from equipment changes if existing _del_cable_from_equipment_changes(network=network, line=line['line']) line_length = calc_geo_dist_vincenty(network=network, node_source=adj_node1, node_target=branch_tee) line = Line(id=random.randint(10 ** 8, 10 ** 9), length=line_length / 1e3, quantity=1, kind=line_kind, type=line_type, grid=network.mv_grid) network.mv_grid.graph.add_edge(adj_node1, branch_tee, line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) line_length = calc_geo_dist_vincenty(network=network, node_source=adj_node2, node_target=branch_tee) line = Line(id=random.randint(10 ** 8, 10 ** 9), length=line_length / 1e3, quantity=1, kind=line_kind, type=line_type, grid=network.mv_grid) network.mv_grid.graph.add_edge(adj_node2, branch_tee, line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) # add new branch for new node (node to branch tee) # ================================================ line_length = calc_geo_dist_vincenty(network=network, node_source=node, node_target=branch_tee) line = Line(id=random.randint(10 ** 8, 10 ** 9), length=line_length / 1e3, quantity=1, kind=std_line_kind, type=std_line_type, grid=network.mv_grid) network.mv_grid.graph.add_edge(node, branch_tee, line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) target_obj_result = branch_tee # node ist nearest connection point else: # what kind of node is to be connected? (which type is node of?) # LVStation: Connect to LVStation or BranchTee # Generator: Connect to LVStation, BranchTee or Generator if isinstance(node, LVStation): valid_conn_objects = (LVStation, BranchTee) elif isinstance(node, Generator): valid_conn_objects = (LVStation, BranchTee, Generator) else: raise ValueError('Oops, the node you are trying to connect is not a valid connection object') # if target is generator or Load, check if it is aggregated (=> connection not allowed) if isinstance(target_obj['obj'], (Generator, Load)): target_is_aggregated = any([_ for _ in network.mv_grid.graph.edge[target_obj['obj']].values() if _['type'] == 'line_aggr']) else: target_is_aggregated = False # target node is not a load area of type aggregated if isinstance(target_obj['obj'], valid_conn_objects) and not target_is_aggregated: # add new branch for satellite (station to station) line_length = calc_geo_dist_vincenty(network=network, node_source=node, node_target=target_obj['obj']) line = Line(id=random.randint(10 ** 8, 10 ** 9), type=std_line_type, kind=std_line_kind, quantity=1, length=line_length / 1e3, grid=network.mv_grid) network.mv_grid.graph.add_edge(node, target_obj['obj'], line=line, type='line') # add line to equipment changes to track costs _add_cable_to_equipment_changes(network=network, line=line) target_obj_result = target_obj['obj'] return target_obj_result
def default(self): """Default for enum field. Will cause resolution of Enum type and unresolved default value. """ try: return self.__resolved_default except AttributeError: resolved_default = super(EnumField, self).default if isinstance(resolved_default, (six.string_types, six.integer_types)): # pylint:disable=not-callable resolved_default = self.type(resolved_default) self.__resolved_default = resolved_default return self.__resolved_default
Default for enum field. Will cause resolution of Enum type and unresolved default value.
Below is the the instruction that describes the task: ### Input: Default for enum field. Will cause resolution of Enum type and unresolved default value. ### Response: def default(self): """Default for enum field. Will cause resolution of Enum type and unresolved default value. """ try: return self.__resolved_default except AttributeError: resolved_default = super(EnumField, self).default if isinstance(resolved_default, (six.string_types, six.integer_types)): # pylint:disable=not-callable resolved_default = self.type(resolved_default) self.__resolved_default = resolved_default return self.__resolved_default
def resolve_inputs(self, layers): '''Resolve the names of inputs for this layer into shape tuples. Parameters ---------- layers : list of :class:`Layer` A list of the layers that are available for resolving inputs. Raises ------ theanets.util.ConfigurationError : If an input cannot be resolved. ''' resolved = {} for name, shape in self._input_shapes.items(): if shape is None: name, shape = self._resolve_shape(name, layers) resolved[name] = shape self._input_shapes = resolved
Resolve the names of inputs for this layer into shape tuples. Parameters ---------- layers : list of :class:`Layer` A list of the layers that are available for resolving inputs. Raises ------ theanets.util.ConfigurationError : If an input cannot be resolved.
Below is the the instruction that describes the task: ### Input: Resolve the names of inputs for this layer into shape tuples. Parameters ---------- layers : list of :class:`Layer` A list of the layers that are available for resolving inputs. Raises ------ theanets.util.ConfigurationError : If an input cannot be resolved. ### Response: def resolve_inputs(self, layers): '''Resolve the names of inputs for this layer into shape tuples. Parameters ---------- layers : list of :class:`Layer` A list of the layers that are available for resolving inputs. Raises ------ theanets.util.ConfigurationError : If an input cannot be resolved. ''' resolved = {} for name, shape in self._input_shapes.items(): if shape is None: name, shape = self._resolve_shape(name, layers) resolved[name] = shape self._input_shapes = resolved
def refresh(self): """Remove editors that are not longer open.""" self._update_id_list() for _id in self.history[:]: if _id not in self.id_list: self.history.remove(_id)
Remove editors that are not longer open.
Below is the the instruction that describes the task: ### Input: Remove editors that are not longer open. ### Response: def refresh(self): """Remove editors that are not longer open.""" self._update_id_list() for _id in self.history[:]: if _id not in self.id_list: self.history.remove(_id)
def get_spark_session(enable_hive=False, app_name='marvin-engine', configs=[]): """Return a Spark Session object""" # Prepare spark context to be used import findspark findspark.init() from pyspark.sql import SparkSession # prepare spark sesseion to be returned spark = SparkSession.builder spark = spark.appName(app_name) spark = spark.enableHiveSupport() if enable_hive else spark # if has configs for config in configs: spark = spark.config(config) return spark.getOrCreate()
Return a Spark Session object
Below is the the instruction that describes the task: ### Input: Return a Spark Session object ### Response: def get_spark_session(enable_hive=False, app_name='marvin-engine', configs=[]): """Return a Spark Session object""" # Prepare spark context to be used import findspark findspark.init() from pyspark.sql import SparkSession # prepare spark sesseion to be returned spark = SparkSession.builder spark = spark.appName(app_name) spark = spark.enableHiveSupport() if enable_hive else spark # if has configs for config in configs: spark = spark.config(config) return spark.getOrCreate()
def find_name(self, template_name, search_dirs): """ Return the path to a template with the given name. Arguments: template_name: the name of the template. search_dirs: the list of directories in which to search. """ file_name = self.make_file_name(template_name) return self._find_path_required(search_dirs, file_name)
Return the path to a template with the given name. Arguments: template_name: the name of the template. search_dirs: the list of directories in which to search.
Below is the the instruction that describes the task: ### Input: Return the path to a template with the given name. Arguments: template_name: the name of the template. search_dirs: the list of directories in which to search. ### Response: def find_name(self, template_name, search_dirs): """ Return the path to a template with the given name. Arguments: template_name: the name of the template. search_dirs: the list of directories in which to search. """ file_name = self.make_file_name(template_name) return self._find_path_required(search_dirs, file_name)
def run(self, conn, tmp, module_name, module_args, inject): ''' handler for file transfer operations ''' tokens = shlex.split(module_args) source = tokens[0] # FIXME: error handling args = " ".join(tokens[1:]) source = utils.template(self.runner.basedir, source, inject) source = utils.path_dwim(self.runner.basedir, source) # transfer the file to a remote tmp location source = source.replace('\x00','') # why does this happen here? args = args.replace('\x00','') # why does this happen here? tmp_src = os.path.join(tmp, os.path.basename(source)) tmp_src = tmp_src.replace('\x00', '') conn.put_file(source, tmp_src) # fix file permissions when the copy is done as a different user if self.runner.sudo and self.runner.sudo_user != 'root': prepcmd = 'chmod a+rx %s' % tmp_src else: prepcmd = 'chmod +x %s' % tmp_src # add preparation steps to one ssh roundtrip executing the script module_args = prepcmd + '; ' + tmp_src + ' ' + args handler = utils.plugins.action_loader.get('raw', self.runner) result = handler.run(conn, tmp, 'raw', module_args, inject) # clean up after if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1': self.runner._low_level_exec_command(conn, 'rm -rf %s >/dev/null 2>&1' % tmp, tmp) return result
handler for file transfer operations
Below is the the instruction that describes the task: ### Input: handler for file transfer operations ### Response: def run(self, conn, tmp, module_name, module_args, inject): ''' handler for file transfer operations ''' tokens = shlex.split(module_args) source = tokens[0] # FIXME: error handling args = " ".join(tokens[1:]) source = utils.template(self.runner.basedir, source, inject) source = utils.path_dwim(self.runner.basedir, source) # transfer the file to a remote tmp location source = source.replace('\x00','') # why does this happen here? args = args.replace('\x00','') # why does this happen here? tmp_src = os.path.join(tmp, os.path.basename(source)) tmp_src = tmp_src.replace('\x00', '') conn.put_file(source, tmp_src) # fix file permissions when the copy is done as a different user if self.runner.sudo and self.runner.sudo_user != 'root': prepcmd = 'chmod a+rx %s' % tmp_src else: prepcmd = 'chmod +x %s' % tmp_src # add preparation steps to one ssh roundtrip executing the script module_args = prepcmd + '; ' + tmp_src + ' ' + args handler = utils.plugins.action_loader.get('raw', self.runner) result = handler.run(conn, tmp, 'raw', module_args, inject) # clean up after if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1': self.runner._low_level_exec_command(conn, 'rm -rf %s >/dev/null 2>&1' % tmp, tmp) return result
def expand_details(df, detailCol='detail'): """Expands the details column of the given dataframe and returns the resulting DataFrame. :df: The input DataFrame. :detailCol: The detail column name. :returns: Returns DataFrame with new columns from pbp parsing. """ df = copy.deepcopy(df) df['detail'] = df[detailCol] dicts = [sportsref.nfl.pbp.parse_play_details(detail) for detail in df['detail'].values] # clean up unmatched details cols = {c for d in dicts if d for c in d.keys()} blankEntry = {c: np.nan for c in cols} newDicts = [d if d else blankEntry for d in dicts] # get details DataFrame and merge it with original to create main DataFrame details = pd.DataFrame(newDicts) df = pd.merge(df, details, left_index=True, right_index=True) # add isError column errors = [i for i, d in enumerate(dicts) if d is None] df['isError'] = False df.loc[errors, 'isError'] = True # fill in some NaN's necessary for _clean_features df.loc[0, 'qtr_time_remain'] = '15:00' df.qtr_time_remain.fillna(method='bfill', inplace=True) df.qtr_time_remain.fillna( pd.Series(np.where(df.quarter == 4, '0:00', '15:00')), inplace=True ) # use _clean_features to clean up and add columns new_df = df.apply(_clean_features, axis=1) return new_df
Expands the details column of the given dataframe and returns the resulting DataFrame. :df: The input DataFrame. :detailCol: The detail column name. :returns: Returns DataFrame with new columns from pbp parsing.
Below is the the instruction that describes the task: ### Input: Expands the details column of the given dataframe and returns the resulting DataFrame. :df: The input DataFrame. :detailCol: The detail column name. :returns: Returns DataFrame with new columns from pbp parsing. ### Response: def expand_details(df, detailCol='detail'): """Expands the details column of the given dataframe and returns the resulting DataFrame. :df: The input DataFrame. :detailCol: The detail column name. :returns: Returns DataFrame with new columns from pbp parsing. """ df = copy.deepcopy(df) df['detail'] = df[detailCol] dicts = [sportsref.nfl.pbp.parse_play_details(detail) for detail in df['detail'].values] # clean up unmatched details cols = {c for d in dicts if d for c in d.keys()} blankEntry = {c: np.nan for c in cols} newDicts = [d if d else blankEntry for d in dicts] # get details DataFrame and merge it with original to create main DataFrame details = pd.DataFrame(newDicts) df = pd.merge(df, details, left_index=True, right_index=True) # add isError column errors = [i for i, d in enumerate(dicts) if d is None] df['isError'] = False df.loc[errors, 'isError'] = True # fill in some NaN's necessary for _clean_features df.loc[0, 'qtr_time_remain'] = '15:00' df.qtr_time_remain.fillna(method='bfill', inplace=True) df.qtr_time_remain.fillna( pd.Series(np.where(df.quarter == 4, '0:00', '15:00')), inplace=True ) # use _clean_features to clean up and add columns new_df = df.apply(_clean_features, axis=1) return new_df
def subtract( self, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, microseconds=0, ): """ Remove duration from the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :param hours: The number of hours :type hours: int :param minutes: The number of minutes :type minutes: int :param seconds: The number of seconds :type seconds: int :param microseconds: The number of microseconds :type microseconds: int :rtype: DateTime """ return self.add( years=-years, months=-months, weeks=-weeks, days=-days, hours=-hours, minutes=-minutes, seconds=-seconds, microseconds=-microseconds, )
Remove duration from the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :param hours: The number of hours :type hours: int :param minutes: The number of minutes :type minutes: int :param seconds: The number of seconds :type seconds: int :param microseconds: The number of microseconds :type microseconds: int :rtype: DateTime
Below is the the instruction that describes the task: ### Input: Remove duration from the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :param hours: The number of hours :type hours: int :param minutes: The number of minutes :type minutes: int :param seconds: The number of seconds :type seconds: int :param microseconds: The number of microseconds :type microseconds: int :rtype: DateTime ### Response: def subtract( self, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, microseconds=0, ): """ Remove duration from the instance. :param years: The number of years :type years: int :param months: The number of months :type months: int :param weeks: The number of weeks :type weeks: int :param days: The number of days :type days: int :param hours: The number of hours :type hours: int :param minutes: The number of minutes :type minutes: int :param seconds: The number of seconds :type seconds: int :param microseconds: The number of microseconds :type microseconds: int :rtype: DateTime """ return self.add( years=-years, months=-months, weeks=-weeks, days=-days, hours=-hours, minutes=-minutes, seconds=-seconds, microseconds=-microseconds, )
def get_float(self, key, optional=False): """ Tries to fetch a variable from the config and expects it to be strictly a float :param key: Variable to look for :param optional: Whether to raise ConfigKeyNotFoundError if key was not found :return: float """ return self._get_typed_value(key, float, lambda x: float(x), optional)
Tries to fetch a variable from the config and expects it to be strictly a float :param key: Variable to look for :param optional: Whether to raise ConfigKeyNotFoundError if key was not found :return: float
Below is the the instruction that describes the task: ### Input: Tries to fetch a variable from the config and expects it to be strictly a float :param key: Variable to look for :param optional: Whether to raise ConfigKeyNotFoundError if key was not found :return: float ### Response: def get_float(self, key, optional=False): """ Tries to fetch a variable from the config and expects it to be strictly a float :param key: Variable to look for :param optional: Whether to raise ConfigKeyNotFoundError if key was not found :return: float """ return self._get_typed_value(key, float, lambda x: float(x), optional)
def save_package_contents(self, root, team, owner, pkgname): """ Saves the in-memory contents to a file in the local package repository. """ assert isinstance(root, RootNode) instance_hash = hash_contents(root) pkg_path = self.package_path(team, owner, pkgname) if not os.path.isdir(pkg_path): os.makedirs(pkg_path) os.mkdir(os.path.join(pkg_path, self.CONTENTS_DIR)) os.mkdir(os.path.join(pkg_path, self.TAGS_DIR)) os.mkdir(os.path.join(pkg_path, self.VERSIONS_DIR)) dest = os.path.join(pkg_path, self.CONTENTS_DIR, instance_hash) with open(dest, 'w') as contents_file: json.dump(root, contents_file, default=encode_node, indent=2, sort_keys=True) tag_dir = os.path.join(pkg_path, self.TAGS_DIR) if not os.path.isdir(tag_dir): os.mkdir(tag_dir) latest_tag = os.path.join(pkg_path, self.TAGS_DIR, self.LATEST) with open (latest_tag, 'w') as tagfile: tagfile.write("{hsh}".format(hsh=instance_hash))
Saves the in-memory contents to a file in the local package repository.
Below is the the instruction that describes the task: ### Input: Saves the in-memory contents to a file in the local package repository. ### Response: def save_package_contents(self, root, team, owner, pkgname): """ Saves the in-memory contents to a file in the local package repository. """ assert isinstance(root, RootNode) instance_hash = hash_contents(root) pkg_path = self.package_path(team, owner, pkgname) if not os.path.isdir(pkg_path): os.makedirs(pkg_path) os.mkdir(os.path.join(pkg_path, self.CONTENTS_DIR)) os.mkdir(os.path.join(pkg_path, self.TAGS_DIR)) os.mkdir(os.path.join(pkg_path, self.VERSIONS_DIR)) dest = os.path.join(pkg_path, self.CONTENTS_DIR, instance_hash) with open(dest, 'w') as contents_file: json.dump(root, contents_file, default=encode_node, indent=2, sort_keys=True) tag_dir = os.path.join(pkg_path, self.TAGS_DIR) if not os.path.isdir(tag_dir): os.mkdir(tag_dir) latest_tag = os.path.join(pkg_path, self.TAGS_DIR, self.LATEST) with open (latest_tag, 'w') as tagfile: tagfile.write("{hsh}".format(hsh=instance_hash))
def _gitignore(root): """ Parses a .gitignore file and returns patterns to match dirs and files. Only basic gitignore patterns are supported. Pattern negation, ** wildcards and anchored patterns are not currently implemented. :param root: A unicode string of the path to the git repository :return: A 2-element tuple: - 0: a list of unicode strings to match against dirs - 1: a list of unicode strings to match against dirs and files """ gitignore_path = os.path.join(root, '.gitignore') dir_patterns = ['.git'] file_patterns = [] if not os.path.exists(gitignore_path): return (dir_patterns, file_patterns) with open(gitignore_path, 'r', encoding='utf-8') as f: for line in f.readlines(): line = line.strip() if not line: continue if line.startswith('#'): continue if '**' in line: raise NotImplementedError('gitignore ** wildcards are not implemented') if line.startswith('!'): raise NotImplementedError('gitignore pattern negation is not implemented') if line.startswith('/'): raise NotImplementedError('gitignore anchored patterns are not implemented') if line.startswith('\\#'): line = '#' + line[2:] if line.startswith('\\!'): line = '!' + line[2:] if line.endswith('/'): dir_patterns.append(line[:-1]) else: file_patterns.append(line) return (dir_patterns, file_patterns)
Parses a .gitignore file and returns patterns to match dirs and files. Only basic gitignore patterns are supported. Pattern negation, ** wildcards and anchored patterns are not currently implemented. :param root: A unicode string of the path to the git repository :return: A 2-element tuple: - 0: a list of unicode strings to match against dirs - 1: a list of unicode strings to match against dirs and files
Below is the the instruction that describes the task: ### Input: Parses a .gitignore file and returns patterns to match dirs and files. Only basic gitignore patterns are supported. Pattern negation, ** wildcards and anchored patterns are not currently implemented. :param root: A unicode string of the path to the git repository :return: A 2-element tuple: - 0: a list of unicode strings to match against dirs - 1: a list of unicode strings to match against dirs and files ### Response: def _gitignore(root): """ Parses a .gitignore file and returns patterns to match dirs and files. Only basic gitignore patterns are supported. Pattern negation, ** wildcards and anchored patterns are not currently implemented. :param root: A unicode string of the path to the git repository :return: A 2-element tuple: - 0: a list of unicode strings to match against dirs - 1: a list of unicode strings to match against dirs and files """ gitignore_path = os.path.join(root, '.gitignore') dir_patterns = ['.git'] file_patterns = [] if not os.path.exists(gitignore_path): return (dir_patterns, file_patterns) with open(gitignore_path, 'r', encoding='utf-8') as f: for line in f.readlines(): line = line.strip() if not line: continue if line.startswith('#'): continue if '**' in line: raise NotImplementedError('gitignore ** wildcards are not implemented') if line.startswith('!'): raise NotImplementedError('gitignore pattern negation is not implemented') if line.startswith('/'): raise NotImplementedError('gitignore anchored patterns are not implemented') if line.startswith('\\#'): line = '#' + line[2:] if line.startswith('\\!'): line = '!' + line[2:] if line.endswith('/'): dir_patterns.append(line[:-1]) else: file_patterns.append(line) return (dir_patterns, file_patterns)
def _get_default_locs(self, vmin, vmax): "Returns the default locations of ticks." if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) locator = self.plot_obj.date_axis_info if self.isminor: return np.compress(locator['min'], locator['val']) return np.compress(locator['maj'], locator['val'])
Returns the default locations of ticks.
Below is the the instruction that describes the task: ### Input: Returns the default locations of ticks. ### Response: def _get_default_locs(self, vmin, vmax): "Returns the default locations of ticks." if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) locator = self.plot_obj.date_axis_info if self.isminor: return np.compress(locator['min'], locator['val']) return np.compress(locator['maj'], locator['val'])
def decode(encoded_histogram, b64_wrap=True): '''Decode an encoded histogram and return a new histogram instance that has been initialized with the decoded content Return: a new histogram instance representing the decoded content Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error ''' hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap) payload = hdr_payload.payload histogram = HdrHistogram(payload.lowest_trackable_value, payload.highest_trackable_value, payload.significant_figures, hdr_payload=hdr_payload) return histogram
Decode an encoded histogram and return a new histogram instance that has been initialized with the decoded content Return: a new histogram instance representing the decoded content Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error
Below is the the instruction that describes the task: ### Input: Decode an encoded histogram and return a new histogram instance that has been initialized with the decoded content Return: a new histogram instance representing the decoded content Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error ### Response: def decode(encoded_histogram, b64_wrap=True): '''Decode an encoded histogram and return a new histogram instance that has been initialized with the decoded content Return: a new histogram instance representing the decoded content Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error ''' hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap) payload = hdr_payload.payload histogram = HdrHistogram(payload.lowest_trackable_value, payload.highest_trackable_value, payload.significant_figures, hdr_payload=hdr_payload) return histogram
def from_shapefile(output, input_shp_files, validate): """ Convert multiple ESRI Shapefile(s) into a single NRML source model file. """ input_parser = shapefileparser.ShapefileParser() source_model = input_parser.read(input_shp_files[0], validate) for f in input_shp_files[1:]: source_model.sources.extend(input_parser.read(f, validate).sources) if not output: output = os.path.splitext(input_shp_files[0])[0] shapefileparser.SourceModelParser().write(output + '.xml', source_model)
Convert multiple ESRI Shapefile(s) into a single NRML source model file.
Below is the the instruction that describes the task: ### Input: Convert multiple ESRI Shapefile(s) into a single NRML source model file. ### Response: def from_shapefile(output, input_shp_files, validate): """ Convert multiple ESRI Shapefile(s) into a single NRML source model file. """ input_parser = shapefileparser.ShapefileParser() source_model = input_parser.read(input_shp_files[0], validate) for f in input_shp_files[1:]: source_model.sources.extend(input_parser.read(f, validate).sources) if not output: output = os.path.splitext(input_shp_files[0])[0] shapefileparser.SourceModelParser().write(output + '.xml', source_model)
def p_InSwitchDefList(p): ''' InSwitchDefList : InSwitchDef | InSwitchDefList InSwitchDef ''' if len(p) <= 2: p[0] = InSwitchDefList(None, p[1]) else: p[0] = InSwitchDefList(p[1], p[2])
InSwitchDefList : InSwitchDef | InSwitchDefList InSwitchDef
Below is the the instruction that describes the task: ### Input: InSwitchDefList : InSwitchDef | InSwitchDefList InSwitchDef ### Response: def p_InSwitchDefList(p): ''' InSwitchDefList : InSwitchDef | InSwitchDefList InSwitchDef ''' if len(p) <= 2: p[0] = InSwitchDefList(None, p[1]) else: p[0] = InSwitchDefList(p[1], p[2])
def reminders_list(self, **kwargs) -> SlackResponse: """Lists all reminders created by or for a given user.""" self._validate_xoxp_token() return self.api_call("reminders.list", http_verb="GET", params=kwargs)
Lists all reminders created by or for a given user.
Below is the the instruction that describes the task: ### Input: Lists all reminders created by or for a given user. ### Response: def reminders_list(self, **kwargs) -> SlackResponse: """Lists all reminders created by or for a given user.""" self._validate_xoxp_token() return self.api_call("reminders.list", http_verb="GET", params=kwargs)
def _init_externals(): """Initialize external projects by putting them into the path""" if __version__ == 'git': sys.path.insert(0, osp.join(osp.dirname(__file__), 'ext', 'gitdb')) try: import gitdb except ImportError: raise ImportError("'gitdb' could not be found in your PYTHONPATH")
Initialize external projects by putting them into the path
Below is the the instruction that describes the task: ### Input: Initialize external projects by putting them into the path ### Response: def _init_externals(): """Initialize external projects by putting them into the path""" if __version__ == 'git': sys.path.insert(0, osp.join(osp.dirname(__file__), 'ext', 'gitdb')) try: import gitdb except ImportError: raise ImportError("'gitdb' could not be found in your PYTHONPATH")
def start(docker_url='unix://var/run/docker.sock', timeout=CLIENT_TIMEOUT, tag='salt/engines/docker_events', filters=None): ''' Scan for Docker events and fire events Example Config .. code-block:: yaml engines: - docker_events: docker_url: unix://var/run/docker.sock filters: event: - start - stop - die - oom The config above sets up engines to listen for events from the Docker daemon and publish them to the Salt event bus. For filter reference, see https://docs.docker.com/engine/reference/commandline/events/ ''' if __opts__.get('__role') == 'master': fire_master = salt.utils.event.get_master_event( __opts__, __opts__['sock_dir']).fire_event else: fire_master = None def fire(tag, msg): ''' How to fire the event ''' if fire_master: fire_master(msg, tag) else: __salt__['event.send'](tag, msg) try: # docker-py 2.0 renamed this client attribute client = docker.APIClient(base_url=docker_url, timeout=timeout) except AttributeError: client = docker.Client(base_url=docker_url, timeout=timeout) try: events = client.events(filters=filters) for event in events: data = salt.utils.json.loads(event.decode(__salt_system_encoding__, errors='replace')) # https://github.com/docker/cli/blob/master/cli/command/system/events.go#L109 # https://github.com/docker/engine-api/blob/master/types/events/events.go # Each output includes the event type, actor id, name and action. # status field can be ommited if data['Action']: fire('{0}/{1}'.format(tag, data['Action']), data) else: fire('{0}/{1}'.format(tag, data['status']), data) except Exception: traceback.print_exc()
Scan for Docker events and fire events Example Config .. code-block:: yaml engines: - docker_events: docker_url: unix://var/run/docker.sock filters: event: - start - stop - die - oom The config above sets up engines to listen for events from the Docker daemon and publish them to the Salt event bus. For filter reference, see https://docs.docker.com/engine/reference/commandline/events/
Below is the the instruction that describes the task: ### Input: Scan for Docker events and fire events Example Config .. code-block:: yaml engines: - docker_events: docker_url: unix://var/run/docker.sock filters: event: - start - stop - die - oom The config above sets up engines to listen for events from the Docker daemon and publish them to the Salt event bus. For filter reference, see https://docs.docker.com/engine/reference/commandline/events/ ### Response: def start(docker_url='unix://var/run/docker.sock', timeout=CLIENT_TIMEOUT, tag='salt/engines/docker_events', filters=None): ''' Scan for Docker events and fire events Example Config .. code-block:: yaml engines: - docker_events: docker_url: unix://var/run/docker.sock filters: event: - start - stop - die - oom The config above sets up engines to listen for events from the Docker daemon and publish them to the Salt event bus. For filter reference, see https://docs.docker.com/engine/reference/commandline/events/ ''' if __opts__.get('__role') == 'master': fire_master = salt.utils.event.get_master_event( __opts__, __opts__['sock_dir']).fire_event else: fire_master = None def fire(tag, msg): ''' How to fire the event ''' if fire_master: fire_master(msg, tag) else: __salt__['event.send'](tag, msg) try: # docker-py 2.0 renamed this client attribute client = docker.APIClient(base_url=docker_url, timeout=timeout) except AttributeError: client = docker.Client(base_url=docker_url, timeout=timeout) try: events = client.events(filters=filters) for event in events: data = salt.utils.json.loads(event.decode(__salt_system_encoding__, errors='replace')) # https://github.com/docker/cli/blob/master/cli/command/system/events.go#L109 # https://github.com/docker/engine-api/blob/master/types/events/events.go # Each output includes the event type, actor id, name and action. # status field can be ommited if data['Action']: fire('{0}/{1}'.format(tag, data['Action']), data) else: fire('{0}/{1}'.format(tag, data['status']), data) except Exception: traceback.print_exc()
def favorite_remove(self, post_id): """Remove a post from favorites (Requires login). Parameters: post_id (int): Where post_id is the post id. """ return self._get('favorites/{0}.json'.format(post_id), method='DELETE', auth=True)
Remove a post from favorites (Requires login). Parameters: post_id (int): Where post_id is the post id.
Below is the the instruction that describes the task: ### Input: Remove a post from favorites (Requires login). Parameters: post_id (int): Where post_id is the post id. ### Response: def favorite_remove(self, post_id): """Remove a post from favorites (Requires login). Parameters: post_id (int): Where post_id is the post id. """ return self._get('favorites/{0}.json'.format(post_id), method='DELETE', auth=True)
def find_class(self): """Return a class related with this type.""" if self.value <= 1: return InstructionsProperty elif self.value <= 3: return NextTablesProperty elif self.value <= 7: return ActionsProperty return OxmProperty
Return a class related with this type.
Below is the the instruction that describes the task: ### Input: Return a class related with this type. ### Response: def find_class(self): """Return a class related with this type.""" if self.value <= 1: return InstructionsProperty elif self.value <= 3: return NextTablesProperty elif self.value <= 7: return ActionsProperty return OxmProperty
def _ReadFloatingPointDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a floating-point data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: FloatingPointDefinition: floating-point data type definition. """ return self._ReadFixedSizeDataTypeDefinition( definitions_registry, definition_values, data_types.FloatingPointDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, is_member=is_member, supported_size_values=(4, 8))
Reads a floating-point data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: FloatingPointDefinition: floating-point data type definition.
Below is the the instruction that describes the task: ### Input: Reads a floating-point data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: FloatingPointDefinition: floating-point data type definition. ### Response: def _ReadFloatingPointDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads a floating-point data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: FloatingPointDefinition: floating-point data type definition. """ return self._ReadFixedSizeDataTypeDefinition( definitions_registry, definition_values, data_types.FloatingPointDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, is_member=is_member, supported_size_values=(4, 8))
def set_rendering_intent(self, rendering_intent): """Set rendering intent variant for sRGB chunk""" if rendering_intent not in (None, PERCEPTUAL, RELATIVE_COLORIMETRIC, SATURATION, ABSOLUTE_COLORIMETRIC): raise FormatError('Unknown redering intent') self.rendering_intent = rendering_intent
Set rendering intent variant for sRGB chunk
Below is the the instruction that describes the task: ### Input: Set rendering intent variant for sRGB chunk ### Response: def set_rendering_intent(self, rendering_intent): """Set rendering intent variant for sRGB chunk""" if rendering_intent not in (None, PERCEPTUAL, RELATIVE_COLORIMETRIC, SATURATION, ABSOLUTE_COLORIMETRIC): raise FormatError('Unknown redering intent') self.rendering_intent = rendering_intent
def load(self, name): """ If not yet in the cache, load the named template and compiles it, placing it into the cache. If in cache, return the cached template. """ if self.reload: self._maybe_purge_cache() template = self.cache.get(name) if template: return template path = self.resolve(name) if not path: raise OSError(errno.ENOENT, "File not found: %s" % name) with codecs.open(path, 'r', encoding='UTF-8') as f: contents = f.read() mtime = os.fstat(f.fileno()).st_mtime template = self.load_string(contents, filename=path) template.mtime = mtime template.path = path self.cache[name] = template return template
If not yet in the cache, load the named template and compiles it, placing it into the cache. If in cache, return the cached template.
Below is the the instruction that describes the task: ### Input: If not yet in the cache, load the named template and compiles it, placing it into the cache. If in cache, return the cached template. ### Response: def load(self, name): """ If not yet in the cache, load the named template and compiles it, placing it into the cache. If in cache, return the cached template. """ if self.reload: self._maybe_purge_cache() template = self.cache.get(name) if template: return template path = self.resolve(name) if not path: raise OSError(errno.ENOENT, "File not found: %s" % name) with codecs.open(path, 'r', encoding='UTF-8') as f: contents = f.read() mtime = os.fstat(f.fileno()).st_mtime template = self.load_string(contents, filename=path) template.mtime = mtime template.path = path self.cache[name] = template return template
def _handle_comparison(self, truism): """ Handles all comparisons. """ # print("COMP:", truism) is_lt, is_equal, is_unsigned = self.comparison_info[truism.op] size = len(truism.args[0]) int_max = 2**size-1 if is_unsigned else 2**(size-1)-1 int_min = -2**(size-1) left_min = self._min(truism.args[0], signed=not is_unsigned) left_max = self._max(truism.args[0], signed=not is_unsigned) right_min = self._min(truism.args[1], signed=not is_unsigned) right_max = self._max(truism.args[1], signed=not is_unsigned) bound_max = right_max if is_equal else (right_max-1 if is_lt else right_max+1) bound_min = right_min if is_equal else (right_min-1 if is_lt else right_min+1) if is_lt and bound_max < int_min: # if the bound max is negative and we're unsigned less than, we're fucked raise ClaripyBalancerUnsatError() elif not is_lt and bound_min > int_max: # if the bound min is too big, we're fucked raise ClaripyBalancerUnsatError() current_min = int_min current_max = int_max if is_lt: current_max = min(int_max, left_max, bound_max) self._add_upper_bound(truism.args[0], current_max) else: current_min = max(int_min, left_min, bound_min) self._add_lower_bound(truism.args[0], current_min)
Handles all comparisons.
Below is the the instruction that describes the task: ### Input: Handles all comparisons. ### Response: def _handle_comparison(self, truism): """ Handles all comparisons. """ # print("COMP:", truism) is_lt, is_equal, is_unsigned = self.comparison_info[truism.op] size = len(truism.args[0]) int_max = 2**size-1 if is_unsigned else 2**(size-1)-1 int_min = -2**(size-1) left_min = self._min(truism.args[0], signed=not is_unsigned) left_max = self._max(truism.args[0], signed=not is_unsigned) right_min = self._min(truism.args[1], signed=not is_unsigned) right_max = self._max(truism.args[1], signed=not is_unsigned) bound_max = right_max if is_equal else (right_max-1 if is_lt else right_max+1) bound_min = right_min if is_equal else (right_min-1 if is_lt else right_min+1) if is_lt and bound_max < int_min: # if the bound max is negative and we're unsigned less than, we're fucked raise ClaripyBalancerUnsatError() elif not is_lt and bound_min > int_max: # if the bound min is too big, we're fucked raise ClaripyBalancerUnsatError() current_min = int_min current_max = int_max if is_lt: current_max = min(int_max, left_max, bound_max) self._add_upper_bound(truism.args[0], current_max) else: current_min = max(int_min, left_min, bound_min) self._add_lower_bound(truism.args[0], current_min)
def list(self): """ Get all current hooks :return: All hooks :rtype: list of dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ return self._post( request=ApiActions.LIST.value, uri=ApiUri.HOOKS.value, ).get('hooks')
Get all current hooks :return: All hooks :rtype: list of dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries
Below is the the instruction that describes the task: ### Input: Get all current hooks :return: All hooks :rtype: list of dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries ### Response: def list(self): """ Get all current hooks :return: All hooks :rtype: list of dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ return self._post( request=ApiActions.LIST.value, uri=ApiUri.HOOKS.value, ).get('hooks')
def publish_server_closed(self, server_address, topology_id): """Publish a ServerClosedEvent to all server listeners. :Parameters: - `server_address`: The address (host/port pair) of the server. - `topology_id`: A unique identifier for the topology this server is a part of. """ event = ServerClosedEvent(server_address, topology_id) for subscriber in self.__server_listeners: try: subscriber.closed(event) except Exception: _handle_exception()
Publish a ServerClosedEvent to all server listeners. :Parameters: - `server_address`: The address (host/port pair) of the server. - `topology_id`: A unique identifier for the topology this server is a part of.
Below is the the instruction that describes the task: ### Input: Publish a ServerClosedEvent to all server listeners. :Parameters: - `server_address`: The address (host/port pair) of the server. - `topology_id`: A unique identifier for the topology this server is a part of. ### Response: def publish_server_closed(self, server_address, topology_id): """Publish a ServerClosedEvent to all server listeners. :Parameters: - `server_address`: The address (host/port pair) of the server. - `topology_id`: A unique identifier for the topology this server is a part of. """ event = ServerClosedEvent(server_address, topology_id) for subscriber in self.__server_listeners: try: subscriber.closed(event) except Exception: _handle_exception()
def load(self, read_tuple_name): """Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from. """ self.prefix_width = 0 self.read_tuple_id_width = 0 self.genome_id_width = 0 self.chr_id_width = 0 self.coor_width = 0 parts = read_tuple_name.split("__") self.prefix_width = len(parts[0]) self.read_tuple_id_width = len(parts[1]) segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) self.genome_id_width = max(self.genome_id_width, int_widths[0]) self.chr_id_width = max(self.chr_id_width, int_widths[1]) self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from.
Below is the the instruction that describes the task: ### Input: Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from. ### Response: def load(self, read_tuple_name): """Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from. """ self.prefix_width = 0 self.read_tuple_id_width = 0 self.genome_id_width = 0 self.chr_id_width = 0 self.coor_width = 0 parts = read_tuple_name.split("__") self.prefix_width = len(parts[0]) self.read_tuple_id_width = len(parts[1]) segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) self.genome_id_width = max(self.genome_id_width, int_widths[0]) self.chr_id_width = max(self.chr_id_width, int_widths[1]) self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
def sid(tnet, communities, axis=0, calc='global', decay=0): r""" Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_ Parameters ---------- tnet: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communities : array a Nx1 vector or NxT array of community assignment. axis : int Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str 'global' returns temporal degree centrality (a 1xnode vector) (default); 'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing; 'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities. decay: int if calc = 'time', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. Returns ------- sid: array segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time) Notes ------ SID tries to quantify if there is more segergation or intgration compared to other time-points. If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual. There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'), the third is a value for each community-community pairing (calc='community_pairs'). First we calculate the temporal strength for each edge. This is calculate by .. math:: S_{i,t} = \sum_j G_{i,j,t} The pairwise SID, when the network is undirected, is calculated by .. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t}) Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A. When calculating the SID for a community, it is calculated byL .. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t}) Where C is the number of communities. When calculating the SID globally, it is calculated byL .. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t}) References ----------- .. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] """ tnet, netinfo = utils.process_input(tnet, ['C', 'G', 'TN']) D = temporal_degree_centrality( tnet, calc='time', communities=communities, decay=decay) # Check network output (order of communitiesworks) network_ids = np.unique(communities) communities_size = np.array([sum(communities == n) for n in network_ids]) sid = np.zeros([network_ids.max()+1, network_ids.max()+1, tnet.shape[-1]]) for n in network_ids: for m in network_ids: betweenmodulescaling = 1/(communities_size[n]*communities_size[m]) if netinfo['nettype'][1] == 'd': withinmodulescaling = 1 / \ (communities_size[n]*communities_size[n]) elif netinfo['nettype'][1] == 'u': withinmodulescaling = 2 / \ (communities_size[n]*(communities_size[n]-1)) if n == m: betweenmodulescaling = withinmodulescaling sid[n, m, :] = withinmodulescaling * \ D[n, n, :] - betweenmodulescaling * D[n, m, :] # If nans emerge than there is no connection between networks at time point, so make these 0. sid[np.isnan(sid)] = 0 if calc == 'global': return np.sum(np.sum(sid, axis=1), axis=0) elif calc == 'communities_avg': return np.sum(sid, axis=axis) else: return sid
r""" Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_ Parameters ---------- tnet: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communities : array a Nx1 vector or NxT array of community assignment. axis : int Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str 'global' returns temporal degree centrality (a 1xnode vector) (default); 'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing; 'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities. decay: int if calc = 'time', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. Returns ------- sid: array segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time) Notes ------ SID tries to quantify if there is more segergation or intgration compared to other time-points. If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual. There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'), the third is a value for each community-community pairing (calc='community_pairs'). First we calculate the temporal strength for each edge. This is calculate by .. math:: S_{i,t} = \sum_j G_{i,j,t} The pairwise SID, when the network is undirected, is calculated by .. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t}) Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A. When calculating the SID for a community, it is calculated byL .. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t}) Where C is the number of communities. When calculating the SID globally, it is calculated byL .. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t}) References ----------- .. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_]
Below is the the instruction that describes the task: ### Input: r""" Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_ Parameters ---------- tnet: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communities : array a Nx1 vector or NxT array of community assignment. axis : int Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str 'global' returns temporal degree centrality (a 1xnode vector) (default); 'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing; 'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities. decay: int if calc = 'time', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. Returns ------- sid: array segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time) Notes ------ SID tries to quantify if there is more segergation or intgration compared to other time-points. If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual. There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'), the third is a value for each community-community pairing (calc='community_pairs'). First we calculate the temporal strength for each edge. This is calculate by .. math:: S_{i,t} = \sum_j G_{i,j,t} The pairwise SID, when the network is undirected, is calculated by .. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t}) Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A. When calculating the SID for a community, it is calculated byL .. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t}) Where C is the number of communities. When calculating the SID globally, it is calculated byL .. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t}) References ----------- .. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] ### Response: def sid(tnet, communities, axis=0, calc='global', decay=0): r""" Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_ Parameters ---------- tnet: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communities : array a Nx1 vector or NxT array of community assignment. axis : int Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str 'global' returns temporal degree centrality (a 1xnode vector) (default); 'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing; 'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities. decay: int if calc = 'time', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. Returns ------- sid: array segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time) Notes ------ SID tries to quantify if there is more segergation or intgration compared to other time-points. If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual. There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'), the third is a value for each community-community pairing (calc='community_pairs'). First we calculate the temporal strength for each edge. This is calculate by .. math:: S_{i,t} = \sum_j G_{i,j,t} The pairwise SID, when the network is undirected, is calculated by .. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t}) Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A. When calculating the SID for a community, it is calculated byL .. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t}) Where C is the number of communities. When calculating the SID globally, it is calculated byL .. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t}) References ----------- .. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] """ tnet, netinfo = utils.process_input(tnet, ['C', 'G', 'TN']) D = temporal_degree_centrality( tnet, calc='time', communities=communities, decay=decay) # Check network output (order of communitiesworks) network_ids = np.unique(communities) communities_size = np.array([sum(communities == n) for n in network_ids]) sid = np.zeros([network_ids.max()+1, network_ids.max()+1, tnet.shape[-1]]) for n in network_ids: for m in network_ids: betweenmodulescaling = 1/(communities_size[n]*communities_size[m]) if netinfo['nettype'][1] == 'd': withinmodulescaling = 1 / \ (communities_size[n]*communities_size[n]) elif netinfo['nettype'][1] == 'u': withinmodulescaling = 2 / \ (communities_size[n]*(communities_size[n]-1)) if n == m: betweenmodulescaling = withinmodulescaling sid[n, m, :] = withinmodulescaling * \ D[n, n, :] - betweenmodulescaling * D[n, m, :] # If nans emerge than there is no connection between networks at time point, so make these 0. sid[np.isnan(sid)] = 0 if calc == 'global': return np.sum(np.sum(sid, axis=1), axis=0) elif calc == 'communities_avg': return np.sum(sid, axis=axis) else: return sid
def config_dir_setup(filename): """ sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return: """ path = os.path.dirname(filename) if not os.path.isdir(path): Shell.mkdir(path)
sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return:
Below is the the instruction that describes the task: ### Input: sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return: ### Response: def config_dir_setup(filename): """ sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return: """ path = os.path.dirname(filename) if not os.path.isdir(path): Shell.mkdir(path)
def _get_default(self, obj): ''' Internal implementation of instance attribute access for default values. Handles bookeeping around |PropertyContainer| value, etc. ''' if self.name in obj._property_values: # this shouldn't happen because we should have checked before _get_default() raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values") is_themed = obj.themed_values() is not None and self.name in obj.themed_values() default = self.instance_default(obj) if is_themed: unstable_dict = obj._unstable_themed_values else: unstable_dict = obj._unstable_default_values if self.name in unstable_dict: return unstable_dict[self.name] if self.property._may_have_unstable_default(): if isinstance(default, PropertyValueContainer): default._register_owner(obj, self) unstable_dict[self.name] = default return default
Internal implementation of instance attribute access for default values. Handles bookeeping around |PropertyContainer| value, etc.
Below is the the instruction that describes the task: ### Input: Internal implementation of instance attribute access for default values. Handles bookeeping around |PropertyContainer| value, etc. ### Response: def _get_default(self, obj): ''' Internal implementation of instance attribute access for default values. Handles bookeeping around |PropertyContainer| value, etc. ''' if self.name in obj._property_values: # this shouldn't happen because we should have checked before _get_default() raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values") is_themed = obj.themed_values() is not None and self.name in obj.themed_values() default = self.instance_default(obj) if is_themed: unstable_dict = obj._unstable_themed_values else: unstable_dict = obj._unstable_default_values if self.name in unstable_dict: return unstable_dict[self.name] if self.property._may_have_unstable_default(): if isinstance(default, PropertyValueContainer): default._register_owner(obj, self) unstable_dict[self.name] = default return default
def remove_labels(self, labels, relabel=False): """ Remove one or more labels. Removed labels are assigned a value of zero (i.e., background). Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3], relabel=True) >>> segm.data array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]]) """ self.check_labels(labels) self.reassign_label(labels, new_label=0) if relabel: self.relabel_consecutive()
Remove one or more labels. Removed labels are assigned a value of zero (i.e., background). Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3], relabel=True) >>> segm.data array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]])
Below is the the instruction that describes the task: ### Input: Remove one or more labels. Removed labels are assigned a value of zero (i.e., background). Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3], relabel=True) >>> segm.data array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]]) ### Response: def remove_labels(self, labels, relabel=False): """ Remove one or more labels. Removed labels are assigned a value of zero (i.e., background). Parameters ---------- labels : int, array-like (1D, int) The label number(s) to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3]) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 0, 0, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_labels(labels=[5, 3], relabel=True) >>> segm.data array([[1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0], [3, 3, 0, 0, 0, 0]]) """ self.check_labels(labels) self.reassign_label(labels, new_label=0) if relabel: self.relabel_consecutive()
def full_data(self): """ Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added. """ data = [ self.chat.title, self._username(), self._type(), self._id() ] return " ".join(filter(None, data))
Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added.
Below is the the instruction that describes the task: ### Input: Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added. ### Response: def full_data(self): """ Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added. """ data = [ self.chat.title, self._username(), self._type(), self._id() ] return " ".join(filter(None, data))
def _validate_backend(self): """ ensure backend string representation is correct """ try: self.backend_class # if we get an import error the specified path is wrong except (ImportError, AttributeError) as e: raise ValidationError(_('No valid backend found, got the following python exception: "%s"') % e)
ensure backend string representation is correct
Below is the the instruction that describes the task: ### Input: ensure backend string representation is correct ### Response: def _validate_backend(self): """ ensure backend string representation is correct """ try: self.backend_class # if we get an import error the specified path is wrong except (ImportError, AttributeError) as e: raise ValidationError(_('No valid backend found, got the following python exception: "%s"') % e)
def project_move(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /project-xxxx/move API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2Fmove """ return DXHTTPRequest('/%s/move' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /project-xxxx/move API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2Fmove
Below is the the instruction that describes the task: ### Input: Invokes the /project-xxxx/move API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2Fmove ### Response: def project_move(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /project-xxxx/move API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2Fmove """ return DXHTTPRequest('/%s/move' % object_id, input_params, always_retry=always_retry, **kwargs)
def Execute(self, message): """This function parses the RDFValue from the server. The Run method will be called with the specified RDFValue. Args: message: The GrrMessage that we are called to process. Returns: Upon return a callback will be called on the server to register the end of the function and pass back exceptions. Raises: RuntimeError: The arguments from the server do not match the expected rdf type. """ self.message = message if message: self.require_fastpoll = message.require_fastpoll args = None try: if self.message.args_rdf_name: if not self.in_rdfvalue: raise RuntimeError("Did not expect arguments, got %s." % self.message.args_rdf_name) if self.in_rdfvalue.__name__ != self.message.args_rdf_name: raise RuntimeError( "Unexpected arg type %s != %s." % (self.message.args_rdf_name, self.in_rdfvalue.__name__)) args = self.message.payload # Only allow authenticated messages in the client if self._authentication_required and ( self.message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): raise RuntimeError("Message for %s was not Authenticated." % self.message.name) self.cpu_start = self.proc.cpu_times() self.cpu_limit = self.message.cpu_limit if getattr(flags.FLAGS, "debug_client_actions", False): pdb.set_trace() try: self.Run(args) # Ensure we always add CPU usage even if an exception occurred. finally: used = self.proc.cpu_times() self.cpu_used = (used.user - self.cpu_start.user, used.system - self.cpu_start.system) except NetworkBytesExceededError as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED, "%r: %s" % (e, e), traceback.format_exc()) # We want to report back all errors and map Python exceptions to # Grr Errors. except Exception as e: # pylint: disable=broad-except self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, "%r: %s" % (e, e), traceback.format_exc()) if flags.FLAGS.pdb_post_mortem: self.DisableNanny() pdb.post_mortem() if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK: logging.info("Job Error (%s): %s", self.__class__.__name__, self.status.error_message) if self.status.backtrace: logging.debug(self.status.backtrace) if self.cpu_used: self.status.cpu_time_used.user_cpu_time = self.cpu_used[0] self.status.cpu_time_used.system_cpu_time = self.cpu_used[1] # This returns the error status of the Actions to the flow. self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS) self._RunGC()
This function parses the RDFValue from the server. The Run method will be called with the specified RDFValue. Args: message: The GrrMessage that we are called to process. Returns: Upon return a callback will be called on the server to register the end of the function and pass back exceptions. Raises: RuntimeError: The arguments from the server do not match the expected rdf type.
Below is the the instruction that describes the task: ### Input: This function parses the RDFValue from the server. The Run method will be called with the specified RDFValue. Args: message: The GrrMessage that we are called to process. Returns: Upon return a callback will be called on the server to register the end of the function and pass back exceptions. Raises: RuntimeError: The arguments from the server do not match the expected rdf type. ### Response: def Execute(self, message): """This function parses the RDFValue from the server. The Run method will be called with the specified RDFValue. Args: message: The GrrMessage that we are called to process. Returns: Upon return a callback will be called on the server to register the end of the function and pass back exceptions. Raises: RuntimeError: The arguments from the server do not match the expected rdf type. """ self.message = message if message: self.require_fastpoll = message.require_fastpoll args = None try: if self.message.args_rdf_name: if not self.in_rdfvalue: raise RuntimeError("Did not expect arguments, got %s." % self.message.args_rdf_name) if self.in_rdfvalue.__name__ != self.message.args_rdf_name: raise RuntimeError( "Unexpected arg type %s != %s." % (self.message.args_rdf_name, self.in_rdfvalue.__name__)) args = self.message.payload # Only allow authenticated messages in the client if self._authentication_required and ( self.message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): raise RuntimeError("Message for %s was not Authenticated." % self.message.name) self.cpu_start = self.proc.cpu_times() self.cpu_limit = self.message.cpu_limit if getattr(flags.FLAGS, "debug_client_actions", False): pdb.set_trace() try: self.Run(args) # Ensure we always add CPU usage even if an exception occurred. finally: used = self.proc.cpu_times() self.cpu_used = (used.user - self.cpu_start.user, used.system - self.cpu_start.system) except NetworkBytesExceededError as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED, "%r: %s" % (e, e), traceback.format_exc()) # We want to report back all errors and map Python exceptions to # Grr Errors. except Exception as e: # pylint: disable=broad-except self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, "%r: %s" % (e, e), traceback.format_exc()) if flags.FLAGS.pdb_post_mortem: self.DisableNanny() pdb.post_mortem() if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK: logging.info("Job Error (%s): %s", self.__class__.__name__, self.status.error_message) if self.status.backtrace: logging.debug(self.status.backtrace) if self.cpu_used: self.status.cpu_time_used.user_cpu_time = self.cpu_used[0] self.status.cpu_time_used.system_cpu_time = self.cpu_used[1] # This returns the error status of the Actions to the flow. self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS) self._RunGC()
def _shuffle_items(items, bucket_key=None, disable=None, seed=None, session=None): """ Shuffles a list of `items` in place. If `bucket_key` is None, items are shuffled across the entire list. `bucket_key` is an optional function called for each item in `items` to calculate the key of bucket in which the item falls. Bucket defines the boundaries across which items will not be shuffled. `disable` is a function that takes an item and returns a falsey value if this item is ok to be shuffled. It returns a truthy value otherwise and the truthy value is used as part of the item's key when determining the bucket it belongs to. """ if seed is not None: random.seed(seed) # If `bucket_key` is falsey, shuffle is global. if not bucket_key and not disable: random.shuffle(items) return def get_full_bucket_key(item): assert bucket_key or disable if bucket_key and disable: return ItemKey(bucket=bucket_key(item, session), disabled=disable(item, session)) elif disable: return ItemKey(disabled=disable(item, session)) else: return ItemKey(bucket=bucket_key(item, session)) # For a sequence of items A1, A2, B1, B2, C1, C2, # where key(A1) == key(A2) == key(C1) == key(C2), # items A1, A2, C1, and C2 will end up in the same bucket. buckets = OrderedDict() for item in items: full_bucket_key = get_full_bucket_key(item) if full_bucket_key not in buckets: buckets[full_bucket_key] = [] buckets[full_bucket_key].append(item) # Shuffle inside a bucket bucket_keys = list(buckets.keys()) for full_bucket_key in buckets.keys(): if full_bucket_key.bucket == FAILED_FIRST_LAST_FAILED_BUCKET_KEY: # Do not shuffle the last failed bucket continue if not full_bucket_key.disabled: random.shuffle(buckets[full_bucket_key]) # Shuffle buckets # Only the first bucket can be FAILED_FIRST_LAST_FAILED_BUCKET_KEY if bucket_keys and bucket_keys[0].bucket == FAILED_FIRST_LAST_FAILED_BUCKET_KEY: new_bucket_keys = list(buckets.keys())[1:] random.shuffle(new_bucket_keys) new_bucket_keys.insert(0, bucket_keys[0]) else: new_bucket_keys = list(buckets.keys()) random.shuffle(new_bucket_keys) items[:] = [item for bk in new_bucket_keys for item in buckets[bk]] return
Shuffles a list of `items` in place. If `bucket_key` is None, items are shuffled across the entire list. `bucket_key` is an optional function called for each item in `items` to calculate the key of bucket in which the item falls. Bucket defines the boundaries across which items will not be shuffled. `disable` is a function that takes an item and returns a falsey value if this item is ok to be shuffled. It returns a truthy value otherwise and the truthy value is used as part of the item's key when determining the bucket it belongs to.
Below is the the instruction that describes the task: ### Input: Shuffles a list of `items` in place. If `bucket_key` is None, items are shuffled across the entire list. `bucket_key` is an optional function called for each item in `items` to calculate the key of bucket in which the item falls. Bucket defines the boundaries across which items will not be shuffled. `disable` is a function that takes an item and returns a falsey value if this item is ok to be shuffled. It returns a truthy value otherwise and the truthy value is used as part of the item's key when determining the bucket it belongs to. ### Response: def _shuffle_items(items, bucket_key=None, disable=None, seed=None, session=None): """ Shuffles a list of `items` in place. If `bucket_key` is None, items are shuffled across the entire list. `bucket_key` is an optional function called for each item in `items` to calculate the key of bucket in which the item falls. Bucket defines the boundaries across which items will not be shuffled. `disable` is a function that takes an item and returns a falsey value if this item is ok to be shuffled. It returns a truthy value otherwise and the truthy value is used as part of the item's key when determining the bucket it belongs to. """ if seed is not None: random.seed(seed) # If `bucket_key` is falsey, shuffle is global. if not bucket_key and not disable: random.shuffle(items) return def get_full_bucket_key(item): assert bucket_key or disable if bucket_key and disable: return ItemKey(bucket=bucket_key(item, session), disabled=disable(item, session)) elif disable: return ItemKey(disabled=disable(item, session)) else: return ItemKey(bucket=bucket_key(item, session)) # For a sequence of items A1, A2, B1, B2, C1, C2, # where key(A1) == key(A2) == key(C1) == key(C2), # items A1, A2, C1, and C2 will end up in the same bucket. buckets = OrderedDict() for item in items: full_bucket_key = get_full_bucket_key(item) if full_bucket_key not in buckets: buckets[full_bucket_key] = [] buckets[full_bucket_key].append(item) # Shuffle inside a bucket bucket_keys = list(buckets.keys()) for full_bucket_key in buckets.keys(): if full_bucket_key.bucket == FAILED_FIRST_LAST_FAILED_BUCKET_KEY: # Do not shuffle the last failed bucket continue if not full_bucket_key.disabled: random.shuffle(buckets[full_bucket_key]) # Shuffle buckets # Only the first bucket can be FAILED_FIRST_LAST_FAILED_BUCKET_KEY if bucket_keys and bucket_keys[0].bucket == FAILED_FIRST_LAST_FAILED_BUCKET_KEY: new_bucket_keys = list(buckets.keys())[1:] random.shuffle(new_bucket_keys) new_bucket_keys.insert(0, bucket_keys[0]) else: new_bucket_keys = list(buckets.keys()) random.shuffle(new_bucket_keys) items[:] = [item for bk in new_bucket_keys for item in buckets[bk]] return
def MCMC_pdf_samples(self, fNew, num_samples=1000, starting_loc=None, stepsize=0.1, burn_in=1000, Y_metadata=None): """ Simple implementation of Metropolis sampling algorithm Will run a parallel chain for each input dimension (treats each f independently) Thus assumes f*_1 independant of f*_2 etc. :param num_samples: Number of samples to take :param fNew: f at which to sample around :param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f :param stepsize: Stepsize for the normal proposal distribution (will need modifying) :param burnin: number of samples to use for burnin (will need modifying) :param Y_metadata: Y_metadata for pdf """ print("Warning, using MCMC for sampling y*, needs to be tuned!") if starting_loc is None: starting_loc = fNew from functools import partial logpdf = partial(self.logpdf, f=fNew, Y_metadata=Y_metadata) pdf = lambda y_star: np.exp(logpdf(y=y_star[:, None])) #Should be the link function of f is a good starting point #(i.e. the point before you corrupt it with the likelihood) par_chains = starting_loc.shape[0] chain_values = np.zeros((par_chains, num_samples)) chain_values[:, 0][:,None] = starting_loc #Use same stepsize for all par_chains stepsize = np.ones(par_chains)*stepsize accepted = np.zeros((par_chains, num_samples+burn_in)) accept_ratio = np.zeros(num_samples+burn_in) #Whilst burning in, only need to keep the previous lot burnin_cache = np.zeros(par_chains) burnin_cache[:] = starting_loc.flatten() burning_in = True for i in range(burn_in+num_samples): next_ind = i-burn_in if burning_in: old_y = burnin_cache else: old_y = chain_values[:,next_ind-1] old_lik = pdf(old_y) #Propose new y from Gaussian proposal new_y = np.random.normal(loc=old_y, scale=stepsize) new_lik = pdf(new_y) #Accept using Metropolis (not hastings) acceptance #Always accepts if new_lik > old_lik accept_probability = np.minimum(1, new_lik/old_lik) u = np.random.uniform(0,1,par_chains) #print "Accept prob: ", accept_probability accepts = u < accept_probability if burning_in: burnin_cache[accepts] = new_y[accepts] burnin_cache[~accepts] = old_y[~accepts] if i == burn_in: burning_in = False chain_values[:,0] = burnin_cache else: #If it was accepted then new_y becomes the latest sample chain_values[accepts, next_ind] = new_y[accepts] #Otherwise use old y as the sample chain_values[~accepts, next_ind] = old_y[~accepts] accepted[~accepts, i] = 0 accepted[accepts, i] = 1 accept_ratio[i] = np.sum(accepted[:,i])/float(par_chains) #Show progress if i % int((burn_in+num_samples)*0.1) == 0: print("{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i)) print("Last run accept ratio: ", accept_ratio[i]) print("Average accept ratio: ", np.mean(accept_ratio)) return chain_values
Simple implementation of Metropolis sampling algorithm Will run a parallel chain for each input dimension (treats each f independently) Thus assumes f*_1 independant of f*_2 etc. :param num_samples: Number of samples to take :param fNew: f at which to sample around :param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f :param stepsize: Stepsize for the normal proposal distribution (will need modifying) :param burnin: number of samples to use for burnin (will need modifying) :param Y_metadata: Y_metadata for pdf
Below is the the instruction that describes the task: ### Input: Simple implementation of Metropolis sampling algorithm Will run a parallel chain for each input dimension (treats each f independently) Thus assumes f*_1 independant of f*_2 etc. :param num_samples: Number of samples to take :param fNew: f at which to sample around :param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f :param stepsize: Stepsize for the normal proposal distribution (will need modifying) :param burnin: number of samples to use for burnin (will need modifying) :param Y_metadata: Y_metadata for pdf ### Response: def MCMC_pdf_samples(self, fNew, num_samples=1000, starting_loc=None, stepsize=0.1, burn_in=1000, Y_metadata=None): """ Simple implementation of Metropolis sampling algorithm Will run a parallel chain for each input dimension (treats each f independently) Thus assumes f*_1 independant of f*_2 etc. :param num_samples: Number of samples to take :param fNew: f at which to sample around :param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f :param stepsize: Stepsize for the normal proposal distribution (will need modifying) :param burnin: number of samples to use for burnin (will need modifying) :param Y_metadata: Y_metadata for pdf """ print("Warning, using MCMC for sampling y*, needs to be tuned!") if starting_loc is None: starting_loc = fNew from functools import partial logpdf = partial(self.logpdf, f=fNew, Y_metadata=Y_metadata) pdf = lambda y_star: np.exp(logpdf(y=y_star[:, None])) #Should be the link function of f is a good starting point #(i.e. the point before you corrupt it with the likelihood) par_chains = starting_loc.shape[0] chain_values = np.zeros((par_chains, num_samples)) chain_values[:, 0][:,None] = starting_loc #Use same stepsize for all par_chains stepsize = np.ones(par_chains)*stepsize accepted = np.zeros((par_chains, num_samples+burn_in)) accept_ratio = np.zeros(num_samples+burn_in) #Whilst burning in, only need to keep the previous lot burnin_cache = np.zeros(par_chains) burnin_cache[:] = starting_loc.flatten() burning_in = True for i in range(burn_in+num_samples): next_ind = i-burn_in if burning_in: old_y = burnin_cache else: old_y = chain_values[:,next_ind-1] old_lik = pdf(old_y) #Propose new y from Gaussian proposal new_y = np.random.normal(loc=old_y, scale=stepsize) new_lik = pdf(new_y) #Accept using Metropolis (not hastings) acceptance #Always accepts if new_lik > old_lik accept_probability = np.minimum(1, new_lik/old_lik) u = np.random.uniform(0,1,par_chains) #print "Accept prob: ", accept_probability accepts = u < accept_probability if burning_in: burnin_cache[accepts] = new_y[accepts] burnin_cache[~accepts] = old_y[~accepts] if i == burn_in: burning_in = False chain_values[:,0] = burnin_cache else: #If it was accepted then new_y becomes the latest sample chain_values[accepts, next_ind] = new_y[accepts] #Otherwise use old y as the sample chain_values[~accepts, next_ind] = old_y[~accepts] accepted[~accepts, i] = 0 accepted[accepts, i] = 1 accept_ratio[i] = np.sum(accepted[:,i])/float(par_chains) #Show progress if i % int((burn_in+num_samples)*0.1) == 0: print("{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i)) print("Last run accept ratio: ", accept_ratio[i]) print("Average accept ratio: ", np.mean(accept_ratio)) return chain_values
def HumanReadableStartType(self): """Return a human readable string describing the start type value. Returns: str: human readable description of the start type value. """ if isinstance(self.start_type, py2to3.STRING_TYPES): return self.start_type return human_readable_service_enums.SERVICE_ENUMS['Start'].get( self.start_type, '{0:d}'.format(self.start_type))
Return a human readable string describing the start type value. Returns: str: human readable description of the start type value.
Below is the the instruction that describes the task: ### Input: Return a human readable string describing the start type value. Returns: str: human readable description of the start type value. ### Response: def HumanReadableStartType(self): """Return a human readable string describing the start type value. Returns: str: human readable description of the start type value. """ if isinstance(self.start_type, py2to3.STRING_TYPES): return self.start_type return human_readable_service_enums.SERVICE_ENUMS['Start'].get( self.start_type, '{0:d}'.format(self.start_type))
def get_subgraph_peripheral_nodes(graph: BELGraph, subgraph: Iterable[BaseEntity], node_predicates: NodePredicates = None, edge_predicates: EdgePredicates = None, ): """Get a summary dictionary of all peripheral nodes to a given sub-graph. :return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)}, 'predecessor': {internal node: list of (key, dict)}}} :rtype: dict For example, it might be useful to quantify the number of predecessors and successors: >>> from pybel.struct.filters import exclude_pathology_filter >>> value = 'Blood vessel dilation subgraph' >>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value) >>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter) >>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True): >>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']): >>> continue >>> print(node, >>> len(p[node]['successor']), >>> len(p[node]['predecessor']), >>> len(set(p[node]['successor']) | set(p[node]['predecessor']))) """ node_filter = concatenate_node_predicates(node_predicates=node_predicates) edge_filter = and_edge_predicates(edge_predicates=edge_predicates) result = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for u, v, k, d in get_peripheral_successor_edges(graph, subgraph): if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k): continue result[v]['predecessor'][u].append((k, d)) for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph): if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k): continue result[u]['successor'][v].append((k, d)) return result
Get a summary dictionary of all peripheral nodes to a given sub-graph. :return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)}, 'predecessor': {internal node: list of (key, dict)}}} :rtype: dict For example, it might be useful to quantify the number of predecessors and successors: >>> from pybel.struct.filters import exclude_pathology_filter >>> value = 'Blood vessel dilation subgraph' >>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value) >>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter) >>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True): >>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']): >>> continue >>> print(node, >>> len(p[node]['successor']), >>> len(p[node]['predecessor']), >>> len(set(p[node]['successor']) | set(p[node]['predecessor'])))
Below is the the instruction that describes the task: ### Input: Get a summary dictionary of all peripheral nodes to a given sub-graph. :return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)}, 'predecessor': {internal node: list of (key, dict)}}} :rtype: dict For example, it might be useful to quantify the number of predecessors and successors: >>> from pybel.struct.filters import exclude_pathology_filter >>> value = 'Blood vessel dilation subgraph' >>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value) >>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter) >>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True): >>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']): >>> continue >>> print(node, >>> len(p[node]['successor']), >>> len(p[node]['predecessor']), >>> len(set(p[node]['successor']) | set(p[node]['predecessor']))) ### Response: def get_subgraph_peripheral_nodes(graph: BELGraph, subgraph: Iterable[BaseEntity], node_predicates: NodePredicates = None, edge_predicates: EdgePredicates = None, ): """Get a summary dictionary of all peripheral nodes to a given sub-graph. :return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)}, 'predecessor': {internal node: list of (key, dict)}}} :rtype: dict For example, it might be useful to quantify the number of predecessors and successors: >>> from pybel.struct.filters import exclude_pathology_filter >>> value = 'Blood vessel dilation subgraph' >>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value) >>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter) >>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True): >>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']): >>> continue >>> print(node, >>> len(p[node]['successor']), >>> len(p[node]['predecessor']), >>> len(set(p[node]['successor']) | set(p[node]['predecessor']))) """ node_filter = concatenate_node_predicates(node_predicates=node_predicates) edge_filter = and_edge_predicates(edge_predicates=edge_predicates) result = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for u, v, k, d in get_peripheral_successor_edges(graph, subgraph): if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k): continue result[v]['predecessor'][u].append((k, d)) for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph): if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k): continue result[u]['successor'][v].append((k, d)) return result
def dot_v2(vec1, vec2): """Return the dot product of two vectors""" return vec1.x * vec2.x + vec1.y * vec2.y
Return the dot product of two vectors
Below is the the instruction that describes the task: ### Input: Return the dot product of two vectors ### Response: def dot_v2(vec1, vec2): """Return the dot product of two vectors""" return vec1.x * vec2.x + vec1.y * vec2.y
def fieldvalue_pairs(self, exclude_cache=False): '''Generator of fields,values pairs. Fields correspond to the ones which have been loaded (usually all of them) or not loaded but modified. Check the :ref:`load_only <performance-loadonly>` query function for more details. If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` won't be included. :rtype: a generator of two-elements tuples''' for field in self._meta.scalarfields: if exclude_cache and field.as_cache: continue name = field.attname if hasattr(self, name): yield field, getattr(self, name)
Generator of fields,values pairs. Fields correspond to the ones which have been loaded (usually all of them) or not loaded but modified. Check the :ref:`load_only <performance-loadonly>` query function for more details. If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` won't be included. :rtype: a generator of two-elements tuples
Below is the the instruction that describes the task: ### Input: Generator of fields,values pairs. Fields correspond to the ones which have been loaded (usually all of them) or not loaded but modified. Check the :ref:`load_only <performance-loadonly>` query function for more details. If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` won't be included. :rtype: a generator of two-elements tuples ### Response: def fieldvalue_pairs(self, exclude_cache=False): '''Generator of fields,values pairs. Fields correspond to the ones which have been loaded (usually all of them) or not loaded but modified. Check the :ref:`load_only <performance-loadonly>` query function for more details. If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache` attribute set to ``True`` won't be included. :rtype: a generator of two-elements tuples''' for field in self._meta.scalarfields: if exclude_cache and field.as_cache: continue name = field.attname if hasattr(self, name): yield field, getattr(self, name)
def process_directory_statements_sorted_by_pmid(directory_name): """Processes a directory filled with CSXML files, first normalizing the character encoding to utf-8, and then processing into INDRA statements sorted by pmid. Parameters ---------- directory_name : str The name of a directory filled with csxml files to process Returns ------- pmid_dict : dict A dictionary mapping pmids to a list of statements corresponding to that pmid """ s_dict = defaultdict(list) mp = process_directory(directory_name, lazy=True) for statement in mp.iter_statements(): s_dict[statement.evidence[0].pmid].append(statement) return s_dict
Processes a directory filled with CSXML files, first normalizing the character encoding to utf-8, and then processing into INDRA statements sorted by pmid. Parameters ---------- directory_name : str The name of a directory filled with csxml files to process Returns ------- pmid_dict : dict A dictionary mapping pmids to a list of statements corresponding to that pmid
Below is the the instruction that describes the task: ### Input: Processes a directory filled with CSXML files, first normalizing the character encoding to utf-8, and then processing into INDRA statements sorted by pmid. Parameters ---------- directory_name : str The name of a directory filled with csxml files to process Returns ------- pmid_dict : dict A dictionary mapping pmids to a list of statements corresponding to that pmid ### Response: def process_directory_statements_sorted_by_pmid(directory_name): """Processes a directory filled with CSXML files, first normalizing the character encoding to utf-8, and then processing into INDRA statements sorted by pmid. Parameters ---------- directory_name : str The name of a directory filled with csxml files to process Returns ------- pmid_dict : dict A dictionary mapping pmids to a list of statements corresponding to that pmid """ s_dict = defaultdict(list) mp = process_directory(directory_name, lazy=True) for statement in mp.iter_statements(): s_dict[statement.evidence[0].pmid].append(statement) return s_dict
def delete(cls, resources, background=False, force=False): """Delete an ip by deleting the iface""" if not isinstance(resources, (list, tuple)): resources = [resources] ifaces = [] for item in resources: try: ip_ = cls.info(item) except UsageError: cls.error("Can't find this ip %s" % item) iface = Iface.info(ip_['iface_id']) ifaces.append(iface['id']) return Iface.delete(ifaces, background)
Delete an ip by deleting the iface
Below is the the instruction that describes the task: ### Input: Delete an ip by deleting the iface ### Response: def delete(cls, resources, background=False, force=False): """Delete an ip by deleting the iface""" if not isinstance(resources, (list, tuple)): resources = [resources] ifaces = [] for item in resources: try: ip_ = cls.info(item) except UsageError: cls.error("Can't find this ip %s" % item) iface = Iface.info(ip_['iface_id']) ifaces.append(iface['id']) return Iface.delete(ifaces, background)
def resume(self, vehID): """resume(string) -> None Resumes the vehicle from the current stop (throws an error if the vehicle is not stopped). """ self._connection._beginMessage( tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_RESUME, vehID, 1 + 4) self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 0) self._connection._sendExact()
resume(string) -> None Resumes the vehicle from the current stop (throws an error if the vehicle is not stopped).
Below is the the instruction that describes the task: ### Input: resume(string) -> None Resumes the vehicle from the current stop (throws an error if the vehicle is not stopped). ### Response: def resume(self, vehID): """resume(string) -> None Resumes the vehicle from the current stop (throws an error if the vehicle is not stopped). """ self._connection._beginMessage( tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_RESUME, vehID, 1 + 4) self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 0) self._connection._sendExact()
def columns_used(self): """ Columns from any table used in the model. May come from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( self.choosers_columns_used(), self.alts_columns_used(), self.interaction_columns_used(), util.columns_in_formula(self.default_model_expr), [self.segmentation_col])))
Columns from any table used in the model. May come from either the choosers or alternatives tables.
Below is the the instruction that describes the task: ### Input: Columns from any table used in the model. May come from either the choosers or alternatives tables. ### Response: def columns_used(self): """ Columns from any table used in the model. May come from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( self.choosers_columns_used(), self.alts_columns_used(), self.interaction_columns_used(), util.columns_in_formula(self.default_model_expr), [self.segmentation_col])))
def Delete(self): """Delete public IP. >>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].Delete().WaitUntilComplete() 0 """ public_ip_set = [{'public_ipId': o.id} for o in self.parent.public_ips if o!=self] self.parent.public_ips = [o for o in self.parent.public_ips if o!=self] return(clc.v2.Requests(clc.v2.API.Call('DELETE','servers/%s/%s/publicIPAddresses/%s' % (self.parent.server.alias,self.parent.server.id,self.id), session=self.session), alias=self.parent.server.alias, session=self.session))
Delete public IP. >>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].Delete().WaitUntilComplete() 0
Below is the the instruction that describes the task: ### Input: Delete public IP. >>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].Delete().WaitUntilComplete() 0 ### Response: def Delete(self): """Delete public IP. >>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0].Delete().WaitUntilComplete() 0 """ public_ip_set = [{'public_ipId': o.id} for o in self.parent.public_ips if o!=self] self.parent.public_ips = [o for o in self.parent.public_ips if o!=self] return(clc.v2.Requests(clc.v2.API.Call('DELETE','servers/%s/%s/publicIPAddresses/%s' % (self.parent.server.alias,self.parent.server.id,self.id), session=self.session), alias=self.parent.server.alias, session=self.session))
def can_use_c_for(self, node): """ Check if a for loop can use classic C syntax. To use C syntax: - target should not be assign in the loop - xrange should be use as iterator - order have to be known at compile time """ assert isinstance(node.target, ast.Name) if sys.version_info.major == 3: range_name = 'range' else: range_name = 'xrange' pattern_range = ast.Call(func=ast.Attribute( value=ast.Name(id='__builtin__', ctx=ast.Load(), annotation=None), attr=range_name, ctx=ast.Load()), args=AST_any(), keywords=[]) is_assigned = {node.target.id: False} [is_assigned.update(self.gather(IsAssigned, stmt)) for stmt in node.body] nodes = ASTMatcher(pattern_range).search(node.iter) if (node.iter not in nodes or is_assigned[node.target.id]): return False args = node.iter.args if len(args) < 3: return True if isinstance(args[2], ast.Num): return True return False
Check if a for loop can use classic C syntax. To use C syntax: - target should not be assign in the loop - xrange should be use as iterator - order have to be known at compile time
Below is the the instruction that describes the task: ### Input: Check if a for loop can use classic C syntax. To use C syntax: - target should not be assign in the loop - xrange should be use as iterator - order have to be known at compile time ### Response: def can_use_c_for(self, node): """ Check if a for loop can use classic C syntax. To use C syntax: - target should not be assign in the loop - xrange should be use as iterator - order have to be known at compile time """ assert isinstance(node.target, ast.Name) if sys.version_info.major == 3: range_name = 'range' else: range_name = 'xrange' pattern_range = ast.Call(func=ast.Attribute( value=ast.Name(id='__builtin__', ctx=ast.Load(), annotation=None), attr=range_name, ctx=ast.Load()), args=AST_any(), keywords=[]) is_assigned = {node.target.id: False} [is_assigned.update(self.gather(IsAssigned, stmt)) for stmt in node.body] nodes = ASTMatcher(pattern_range).search(node.iter) if (node.iter not in nodes or is_assigned[node.target.id]): return False args = node.iter.args if len(args) < 3: return True if isinstance(args[2], ast.Num): return True return False