code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def JoinKeyPath(path_segments):
"""Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path.
"""
# This is an optimized way to combine the path segments into a single path
# and combine multiple successive path separators to one.
# Split all the path segments based on the path (segment) separator.
path_segments = [
segment.split(definitions.KEY_PATH_SEPARATOR)
for segment in path_segments]
# Flatten the sublists into one list.
path_segments = [
element for sublist in path_segments for element in sublist]
# Remove empty path segments.
path_segments = filter(None, path_segments)
key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments)
if not key_path.startswith('HKEY_'):
key_path = '{0:s}{1:s}'.format(definitions.KEY_PATH_SEPARATOR, key_path)
return key_path | Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path. | Below is the the instruction that describes the task:
### Input:
Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path.
### Response:
def JoinKeyPath(path_segments):
"""Joins the path segments into key path.
Args:
path_segments (list[str]): Windows Registry key path segments.
Returns:
str: key path.
"""
# This is an optimized way to combine the path segments into a single path
# and combine multiple successive path separators to one.
# Split all the path segments based on the path (segment) separator.
path_segments = [
segment.split(definitions.KEY_PATH_SEPARATOR)
for segment in path_segments]
# Flatten the sublists into one list.
path_segments = [
element for sublist in path_segments for element in sublist]
# Remove empty path segments.
path_segments = filter(None, path_segments)
key_path = definitions.KEY_PATH_SEPARATOR.join(path_segments)
if not key_path.startswith('HKEY_'):
key_path = '{0:s}{1:s}'.format(definitions.KEY_PATH_SEPARATOR, key_path)
return key_path |
def get_link_name (self, tag, attrs, attr):
"""Parse attrs for link name. Return name of link."""
if tag == 'a' and attr == 'href':
# Look for name only up to MAX_NAMELEN characters
data = self.parser.peek(MAX_NAMELEN)
data = data.decode(self.parser.encoding, "ignore")
name = linkname.href_name(data)
if not name:
name = attrs.get_true('title', u'')
elif tag == 'img':
name = attrs.get_true('alt', u'')
if not name:
name = attrs.get_true('title', u'')
else:
name = u""
return name | Parse attrs for link name. Return name of link. | Below is the the instruction that describes the task:
### Input:
Parse attrs for link name. Return name of link.
### Response:
def get_link_name (self, tag, attrs, attr):
"""Parse attrs for link name. Return name of link."""
if tag == 'a' and attr == 'href':
# Look for name only up to MAX_NAMELEN characters
data = self.parser.peek(MAX_NAMELEN)
data = data.decode(self.parser.encoding, "ignore")
name = linkname.href_name(data)
if not name:
name = attrs.get_true('title', u'')
elif tag == 'img':
name = attrs.get_true('alt', u'')
if not name:
name = attrs.get_true('title', u'')
else:
name = u""
return name |
def is_arabicstring(text):
""" Checks for an Arabic standard Unicode block characters
An arabic string can contain spaces, digits and pounctuation.
but only arabic standard characters, not extended arabic
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if re.search(u"([^\u0600-\u0652%s%s%s\s\d])" \
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), text):
return False
return True | Checks for an Arabic standard Unicode block characters
An arabic string can contain spaces, digits and pounctuation.
but only arabic standard characters, not extended arabic
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean | Below is the the instruction that describes the task:
### Input:
Checks for an Arabic standard Unicode block characters
An arabic string can contain spaces, digits and pounctuation.
but only arabic standard characters, not extended arabic
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
### Response:
def is_arabicstring(text):
""" Checks for an Arabic standard Unicode block characters
An arabic string can contain spaces, digits and pounctuation.
but only arabic standard characters, not extended arabic
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if re.search(u"([^\u0600-\u0652%s%s%s\s\d])" \
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), text):
return False
return True |
def update_policy(self,defaultHeaders):
""" if policy in default but not input still return """
if self.inputs is not None:
for k,v in defaultHeaders.items():
if k not in self.inputs:
self.inputs[k] = v
return self.inputs
else:
return self.inputs | if policy in default but not input still return | Below is the the instruction that describes the task:
### Input:
if policy in default but not input still return
### Response:
def update_policy(self,defaultHeaders):
""" if policy in default but not input still return """
if self.inputs is not None:
for k,v in defaultHeaders.items():
if k not in self.inputs:
self.inputs[k] = v
return self.inputs
else:
return self.inputs |
def execute(self, email):
"""Execute use case handling."""
print('Sign up user {0}'.format(email))
self.email_sender.send(email, 'Welcome, "{}"'.format(email)) | Execute use case handling. | Below is the the instruction that describes the task:
### Input:
Execute use case handling.
### Response:
def execute(self, email):
"""Execute use case handling."""
print('Sign up user {0}'.format(email))
self.email_sender.send(email, 'Welcome, "{}"'.format(email)) |
def moist_static_energy(heights, temperature, specific_humidity):
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
heights : array-like
Atmospheric height
temperature : array-like
Atmospheric temperature
specific_humidity : array-like
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy
"""
return (dry_static_energy(heights, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg') | r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
heights : array-like
Atmospheric height
temperature : array-like
Atmospheric temperature
specific_humidity : array-like
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy | Below is the the instruction that describes the task:
### Input:
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
heights : array-like
Atmospheric height
temperature : array-like
Atmospheric temperature
specific_humidity : array-like
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy
### Response:
def moist_static_energy(heights, temperature, specific_humidity):
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
heights : array-like
Atmospheric height
temperature : array-like
Atmospheric temperature
specific_humidity : array-like
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy
"""
return (dry_static_energy(heights, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg') |
def find_descriptor(self, uuid):
"""Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found.
"""
for desc in self.list_descriptors():
if desc.uuid == uuid:
return desc
return None | Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found. | Below is the the instruction that describes the task:
### Input:
Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found.
### Response:
def find_descriptor(self, uuid):
"""Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found.
"""
for desc in self.list_descriptors():
if desc.uuid == uuid:
return desc
return None |
def get_protein_data_pgrouped(proteindata, p_acc, headerfields):
"""Parses protein data for a certain protein into tsv output
dictionary"""
report = get_protein_data_base(proteindata, p_acc, headerfields)
return get_cov_protnumbers(proteindata, p_acc, report) | Parses protein data for a certain protein into tsv output
dictionary | Below is the the instruction that describes the task:
### Input:
Parses protein data for a certain protein into tsv output
dictionary
### Response:
def get_protein_data_pgrouped(proteindata, p_acc, headerfields):
"""Parses protein data for a certain protein into tsv output
dictionary"""
report = get_protein_data_base(proteindata, p_acc, headerfields)
return get_cov_protnumbers(proteindata, p_acc, report) |
def no_wait_release(self, connection: Connection):
'''Synchronous version of :meth:`release`.'''
_logger.debug('No wait check in.')
release_task = asyncio.get_event_loop().create_task(
self.release(connection)
)
self._release_tasks.add(release_task) | Synchronous version of :meth:`release`. | Below is the the instruction that describes the task:
### Input:
Synchronous version of :meth:`release`.
### Response:
def no_wait_release(self, connection: Connection):
'''Synchronous version of :meth:`release`.'''
_logger.debug('No wait check in.')
release_task = asyncio.get_event_loop().create_task(
self.release(connection)
)
self._release_tasks.add(release_task) |
def build_absolute_uri(self, uri):
"""
Return a fully qualified absolute url for the given uri.
"""
request = self.context.get('request', None)
return (
request.build_absolute_uri(uri) if request is not None else uri
) | Return a fully qualified absolute url for the given uri. | Below is the the instruction that describes the task:
### Input:
Return a fully qualified absolute url for the given uri.
### Response:
def build_absolute_uri(self, uri):
"""
Return a fully qualified absolute url for the given uri.
"""
request = self.context.get('request', None)
return (
request.build_absolute_uri(uri) if request is not None else uri
) |
def _format_coredump_stdout(cmd_ret):
'''
Helper function to format the stdout from the get_coredump_network_config function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
'''
ret_dict = {}
for line in cmd_ret['stdout'].splitlines():
line = line.strip().lower()
if line.startswith('enabled:'):
enabled = line.split(':')
if 'true' in enabled[1]:
ret_dict['enabled'] = True
else:
ret_dict['enabled'] = False
break
if line.startswith('host vnic:'):
host_vnic = line.split(':')
ret_dict['host_vnic'] = host_vnic[1].strip()
if line.startswith('network server ip:'):
ip = line.split(':')
ret_dict['ip'] = ip[1].strip()
if line.startswith('network server port:'):
ip_port = line.split(':')
ret_dict['port'] = ip_port[1].strip()
return ret_dict | Helper function to format the stdout from the get_coredump_network_config function.
cmd_ret
The return dictionary that comes from a cmd.run_all call. | Below is the the instruction that describes the task:
### Input:
Helper function to format the stdout from the get_coredump_network_config function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
### Response:
def _format_coredump_stdout(cmd_ret):
'''
Helper function to format the stdout from the get_coredump_network_config function.
cmd_ret
The return dictionary that comes from a cmd.run_all call.
'''
ret_dict = {}
for line in cmd_ret['stdout'].splitlines():
line = line.strip().lower()
if line.startswith('enabled:'):
enabled = line.split(':')
if 'true' in enabled[1]:
ret_dict['enabled'] = True
else:
ret_dict['enabled'] = False
break
if line.startswith('host vnic:'):
host_vnic = line.split(':')
ret_dict['host_vnic'] = host_vnic[1].strip()
if line.startswith('network server ip:'):
ip = line.split(':')
ret_dict['ip'] = ip[1].strip()
if line.startswith('network server port:'):
ip_port = line.split(':')
ret_dict['port'] = ip_port[1].strip()
return ret_dict |
def gen_blocks(output, ascii_props=False, append=False, prefix=""):
"""Generate Unicode blocks."""
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
f.write('%s_blocks = {' % prefix)
no_block = []
last = -1
max_range = MAXASCII if ascii_props else MAXUNICODE
formatter = bytesformat if ascii_props else uniformat
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'Blocks.txt'), 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split(';')
if len(data) < 2:
continue
block = [int(i, 16) for i in data[0].strip().split('..')]
if block[0] > last + 1:
if (last + 1) <= max_range:
endval = block[0] - 1 if (block[0] - 1) < max_range else max_range
no_block.append((last + 1, endval))
last = block[1]
name = format_name(data[1])
inverse_range = []
if block[0] > max_range:
if ascii_props:
f.write('\n "%s": "",' % name)
f.write('\n "^%s": "%s-%s",' % (name, formatter(0), formatter(max_range)))
continue
if block[0] > 0:
inverse_range.append("%s-%s" % (formatter(0), formatter(block[0] - 1)))
if block[1] < max_range:
inverse_range.append("%s-%s" % (formatter(block[1] + 1), formatter(max_range)))
f.write('\n "%s": "%s-%s",' % (name, formatter(block[0]), formatter(block[1])))
f.write('\n "^%s": "%s",' % (name, ''.join(inverse_range)))
if last < max_range:
if (last + 1) <= max_range:
no_block.append((last + 1, max_range))
last = -1
no_block_inverse = []
if not no_block:
no_block_inverse.append((0, max_range))
else:
for piece in no_block:
if piece[0] > last + 1:
no_block_inverse.append((last + 1, piece[0] - 1))
last = piece[1]
for block, name in ((no_block, 'noblock'), (no_block_inverse, '^noblock')):
f.write('\n "%s": "' % name)
for piece in block:
if piece[0] == piece[1]:
f.write(formatter(piece[0]))
else:
f.write("%s-%s" % (formatter(piece[0]), formatter(piece[1])))
f.write('",')
f.write('\n}\n') | Generate Unicode blocks. | Below is the the instruction that describes the task:
### Input:
Generate Unicode blocks.
### Response:
def gen_blocks(output, ascii_props=False, append=False, prefix=""):
"""Generate Unicode blocks."""
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
f.write('%s_blocks = {' % prefix)
no_block = []
last = -1
max_range = MAXASCII if ascii_props else MAXUNICODE
formatter = bytesformat if ascii_props else uniformat
with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'Blocks.txt'), 'r', 'utf-8') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split(';')
if len(data) < 2:
continue
block = [int(i, 16) for i in data[0].strip().split('..')]
if block[0] > last + 1:
if (last + 1) <= max_range:
endval = block[0] - 1 if (block[0] - 1) < max_range else max_range
no_block.append((last + 1, endval))
last = block[1]
name = format_name(data[1])
inverse_range = []
if block[0] > max_range:
if ascii_props:
f.write('\n "%s": "",' % name)
f.write('\n "^%s": "%s-%s",' % (name, formatter(0), formatter(max_range)))
continue
if block[0] > 0:
inverse_range.append("%s-%s" % (formatter(0), formatter(block[0] - 1)))
if block[1] < max_range:
inverse_range.append("%s-%s" % (formatter(block[1] + 1), formatter(max_range)))
f.write('\n "%s": "%s-%s",' % (name, formatter(block[0]), formatter(block[1])))
f.write('\n "^%s": "%s",' % (name, ''.join(inverse_range)))
if last < max_range:
if (last + 1) <= max_range:
no_block.append((last + 1, max_range))
last = -1
no_block_inverse = []
if not no_block:
no_block_inverse.append((0, max_range))
else:
for piece in no_block:
if piece[0] > last + 1:
no_block_inverse.append((last + 1, piece[0] - 1))
last = piece[1]
for block, name in ((no_block, 'noblock'), (no_block_inverse, '^noblock')):
f.write('\n "%s": "' % name)
for piece in block:
if piece[0] == piece[1]:
f.write(formatter(piece[0]))
else:
f.write("%s-%s" % (formatter(piece[0]), formatter(piece[1])))
f.write('",')
f.write('\n}\n') |
def add_bits4subtree_ids(self, relevant_ids):
"""Adds a long integer bits4subtree_ids to each node (Fails cryptically if that field is already present!)
relevant_ids can be a dict of _id to bit representation.
If it is not supplied, a dict will be created by registering the leaf._id into a dict (and returning the dict)
the bits4subtree_ids will have a 1 bit if the _id is at or descended from this node and 0 if it is not
in this subtree.
Returns the dict of ids -> longs
Also creates a dict of long -> node mappings for all internal nodes. Stores this in self as bits2internal_node
"""
if relevant_ids:
checking = True
else:
checking = False
relevant_ids = {}
bit = 1
self.bits2internal_node = {}
for node in self.postorder_node_iter():
p = node._parent
if p is None:
if not node.is_leaf:
self.bits2internal_node[node.bits4subtree_ids] = node
continue
if not hasattr(p, 'bits4subtree_ids'):
p.bits4subtree_ids = 0
i = node._id
# _LOG.debug('node._id ={}'.format(i))
# _LOG.debug('Before par mrca... = {}'.format(p.bits4subtree_ids))
if checking:
b = relevant_ids.get(i)
if b:
if node.is_leaf:
node.bits4subtree_ids = b
else:
node.bits4subtree_ids |= b
else:
if node.is_leaf:
relevant_ids[i] = bit
node.bits4subtree_ids = bit
bit <<= 1
if not node.is_leaf:
self.bits2internal_node[node.bits4subtree_ids] = node
# _LOG.debug('while add bitrep... self.bits2internal_node = {}'.format(self.bits2internal_node))
p.bits4subtree_ids |= node.bits4subtree_ids
return relevant_ids | Adds a long integer bits4subtree_ids to each node (Fails cryptically if that field is already present!)
relevant_ids can be a dict of _id to bit representation.
If it is not supplied, a dict will be created by registering the leaf._id into a dict (and returning the dict)
the bits4subtree_ids will have a 1 bit if the _id is at or descended from this node and 0 if it is not
in this subtree.
Returns the dict of ids -> longs
Also creates a dict of long -> node mappings for all internal nodes. Stores this in self as bits2internal_node | Below is the the instruction that describes the task:
### Input:
Adds a long integer bits4subtree_ids to each node (Fails cryptically if that field is already present!)
relevant_ids can be a dict of _id to bit representation.
If it is not supplied, a dict will be created by registering the leaf._id into a dict (and returning the dict)
the bits4subtree_ids will have a 1 bit if the _id is at or descended from this node and 0 if it is not
in this subtree.
Returns the dict of ids -> longs
Also creates a dict of long -> node mappings for all internal nodes. Stores this in self as bits2internal_node
### Response:
def add_bits4subtree_ids(self, relevant_ids):
"""Adds a long integer bits4subtree_ids to each node (Fails cryptically if that field is already present!)
relevant_ids can be a dict of _id to bit representation.
If it is not supplied, a dict will be created by registering the leaf._id into a dict (and returning the dict)
the bits4subtree_ids will have a 1 bit if the _id is at or descended from this node and 0 if it is not
in this subtree.
Returns the dict of ids -> longs
Also creates a dict of long -> node mappings for all internal nodes. Stores this in self as bits2internal_node
"""
if relevant_ids:
checking = True
else:
checking = False
relevant_ids = {}
bit = 1
self.bits2internal_node = {}
for node in self.postorder_node_iter():
p = node._parent
if p is None:
if not node.is_leaf:
self.bits2internal_node[node.bits4subtree_ids] = node
continue
if not hasattr(p, 'bits4subtree_ids'):
p.bits4subtree_ids = 0
i = node._id
# _LOG.debug('node._id ={}'.format(i))
# _LOG.debug('Before par mrca... = {}'.format(p.bits4subtree_ids))
if checking:
b = relevant_ids.get(i)
if b:
if node.is_leaf:
node.bits4subtree_ids = b
else:
node.bits4subtree_ids |= b
else:
if node.is_leaf:
relevant_ids[i] = bit
node.bits4subtree_ids = bit
bit <<= 1
if not node.is_leaf:
self.bits2internal_node[node.bits4subtree_ids] = node
# _LOG.debug('while add bitrep... self.bits2internal_node = {}'.format(self.bits2internal_node))
p.bits4subtree_ids |= node.bits4subtree_ids
return relevant_ids |
def delete(self, block_type, block_num):
"""
Deletes a block
:param block_type: Type of block
:param block_num: Bloc number
"""
logger.info("deleting block")
blocktype = snap7.snap7types.block_types[block_type]
result = self.library.Cli_Delete(self.pointer, blocktype, block_num)
return result | Deletes a block
:param block_type: Type of block
:param block_num: Bloc number | Below is the the instruction that describes the task:
### Input:
Deletes a block
:param block_type: Type of block
:param block_num: Bloc number
### Response:
def delete(self, block_type, block_num):
"""
Deletes a block
:param block_type: Type of block
:param block_num: Bloc number
"""
logger.info("deleting block")
blocktype = snap7.snap7types.block_types[block_type]
result = self.library.Cli_Delete(self.pointer, blocktype, block_num)
return result |
def maps_get_rules_output_rules_policyname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
policyname = ET.SubElement(rules, "policyname")
policyname.text = kwargs.pop('policyname')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def maps_get_rules_output_rules_policyname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
policyname = ET.SubElement(rules, "policyname")
policyname.text = kwargs.pop('policyname')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _assign_udf_desc_extents(descs, start_extent):
# type: (PyCdlib._UDFDescriptors, int) -> None
'''
An internal function to assign a consecutive sequence of extents for the
given set of UDF Descriptors, starting at the given extent.
Parameters:
descs - The PyCdlib._UDFDescriptors object to assign extents for.
start_extent - The starting extent to assign from.
Returns:
Nothing.
'''
current_extent = start_extent
descs.pvd.set_extent_location(current_extent)
current_extent += 1
descs.impl_use.set_extent_location(current_extent)
current_extent += 1
descs.partition.set_extent_location(current_extent)
current_extent += 1
descs.logical_volume.set_extent_location(current_extent)
current_extent += 1
descs.unallocated_space.set_extent_location(current_extent)
current_extent += 1
descs.terminator.set_extent_location(current_extent)
current_extent += 1 | An internal function to assign a consecutive sequence of extents for the
given set of UDF Descriptors, starting at the given extent.
Parameters:
descs - The PyCdlib._UDFDescriptors object to assign extents for.
start_extent - The starting extent to assign from.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
An internal function to assign a consecutive sequence of extents for the
given set of UDF Descriptors, starting at the given extent.
Parameters:
descs - The PyCdlib._UDFDescriptors object to assign extents for.
start_extent - The starting extent to assign from.
Returns:
Nothing.
### Response:
def _assign_udf_desc_extents(descs, start_extent):
# type: (PyCdlib._UDFDescriptors, int) -> None
'''
An internal function to assign a consecutive sequence of extents for the
given set of UDF Descriptors, starting at the given extent.
Parameters:
descs - The PyCdlib._UDFDescriptors object to assign extents for.
start_extent - The starting extent to assign from.
Returns:
Nothing.
'''
current_extent = start_extent
descs.pvd.set_extent_location(current_extent)
current_extent += 1
descs.impl_use.set_extent_location(current_extent)
current_extent += 1
descs.partition.set_extent_location(current_extent)
current_extent += 1
descs.logical_volume.set_extent_location(current_extent)
current_extent += 1
descs.unallocated_space.set_extent_location(current_extent)
current_extent += 1
descs.terminator.set_extent_location(current_extent)
current_extent += 1 |
def get_attributes(**kwargs):
"""
Get all attributes
"""
attrs = db.DBSession.query(Attr).order_by(Attr.name).all()
return attrs | Get all attributes | Below is the the instruction that describes the task:
### Input:
Get all attributes
### Response:
def get_attributes(**kwargs):
"""
Get all attributes
"""
attrs = db.DBSession.query(Attr).order_by(Attr.name).all()
return attrs |
def file_matches(filename, patterns):
"""Does this filename match any of the patterns?"""
return any(fnmatch.fnmatch(filename, pat)
or fnmatch.fnmatch(os.path.basename(filename), pat)
for pat in patterns) | Does this filename match any of the patterns? | Below is the the instruction that describes the task:
### Input:
Does this filename match any of the patterns?
### Response:
def file_matches(filename, patterns):
"""Does this filename match any of the patterns?"""
return any(fnmatch.fnmatch(filename, pat)
or fnmatch.fnmatch(os.path.basename(filename), pat)
for pat in patterns) |
def substatements(self) -> List[Statement]:
"""Parse substatements.
Raises:
EndOfInput: If past the end of input.
"""
res = []
self.opt_separator()
while self.peek() != "}":
res.append(self.statement())
self.opt_separator()
self.offset += 1
return res | Parse substatements.
Raises:
EndOfInput: If past the end of input. | Below is the the instruction that describes the task:
### Input:
Parse substatements.
Raises:
EndOfInput: If past the end of input.
### Response:
def substatements(self) -> List[Statement]:
"""Parse substatements.
Raises:
EndOfInput: If past the end of input.
"""
res = []
self.opt_separator()
while self.peek() != "}":
res.append(self.statement())
self.opt_separator()
self.offset += 1
return res |
def index(self, sub, *args):
"""
Like newstr.find() but raise ValueError when the substring is not
found.
"""
pos = self.find(sub, *args)
if pos == -1:
raise ValueError('substring not found')
return pos | Like newstr.find() but raise ValueError when the substring is not
found. | Below is the the instruction that describes the task:
### Input:
Like newstr.find() but raise ValueError when the substring is not
found.
### Response:
def index(self, sub, *args):
"""
Like newstr.find() but raise ValueError when the substring is not
found.
"""
pos = self.find(sub, *args)
if pos == -1:
raise ValueError('substring not found')
return pos |
def remove_board(board_id):
"""remove board.
:param board_id: board id (e.g. 'diecimila')
:rtype: None
"""
log.debug('remove %s', board_id)
lines = boards_txt().lines()
lines = filter(lambda x: not x.strip().startswith(board_id + '.'), lines)
boards_txt().write_lines(lines) | remove board.
:param board_id: board id (e.g. 'diecimila')
:rtype: None | Below is the the instruction that describes the task:
### Input:
remove board.
:param board_id: board id (e.g. 'diecimila')
:rtype: None
### Response:
def remove_board(board_id):
"""remove board.
:param board_id: board id (e.g. 'diecimila')
:rtype: None
"""
log.debug('remove %s', board_id)
lines = boards_txt().lines()
lines = filter(lambda x: not x.strip().startswith(board_id + '.'), lines)
boards_txt().write_lines(lines) |
def sum(self, phi1, inplace=True):
"""
DiscreteFactor sum with `phi1`.
Parameters
----------
phi1: `DiscreteFactor` instance.
DiscreteFactor to be added.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi1.sum(phi2, inplace=True)
>>> phi1.variables
['x1', 'x2', 'x3', 'x4']
>>> phi1.cardinality
array([2, 3, 2, 2])
>>> phi1.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]])
"""
phi = self if inplace else self.copy()
if isinstance(phi1, (int, float)):
phi.values += phi1
else:
phi1 = phi1.copy()
# modifying phi to add new variables
extra_vars = set(phi1.variables) - set(phi.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi.values = phi.values[tuple(slice_)]
phi.variables.extend(extra_vars)
new_var_card = phi1.get_cardinality(extra_vars)
phi.cardinality = np.append(phi.cardinality, [new_var_card[var] for var in extra_vars])
# modifying phi1 to add new variables
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[tuple(slice_)]
phi1.variables.extend(extra_vars)
# No need to modify cardinality as we don't need it.
# rearranging the axes of phi1 to match phi
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], \
phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values + phi1.values
if not inplace:
return phi | DiscreteFactor sum with `phi1`.
Parameters
----------
phi1: `DiscreteFactor` instance.
DiscreteFactor to be added.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi1.sum(phi2, inplace=True)
>>> phi1.variables
['x1', 'x2', 'x3', 'x4']
>>> phi1.cardinality
array([2, 3, 2, 2])
>>> phi1.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]]) | Below is the the instruction that describes the task:
### Input:
DiscreteFactor sum with `phi1`.
Parameters
----------
phi1: `DiscreteFactor` instance.
DiscreteFactor to be added.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi1.sum(phi2, inplace=True)
>>> phi1.variables
['x1', 'x2', 'x3', 'x4']
>>> phi1.cardinality
array([2, 3, 2, 2])
>>> phi1.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]])
### Response:
def sum(self, phi1, inplace=True):
"""
DiscreteFactor sum with `phi1`.
Parameters
----------
phi1: `DiscreteFactor` instance.
DiscreteFactor to be added.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi1.sum(phi2, inplace=True)
>>> phi1.variables
['x1', 'x2', 'x3', 'x4']
>>> phi1.cardinality
array([2, 3, 2, 2])
>>> phi1.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]])
"""
phi = self if inplace else self.copy()
if isinstance(phi1, (int, float)):
phi.values += phi1
else:
phi1 = phi1.copy()
# modifying phi to add new variables
extra_vars = set(phi1.variables) - set(phi.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi.values = phi.values[tuple(slice_)]
phi.variables.extend(extra_vars)
new_var_card = phi1.get_cardinality(extra_vars)
phi.cardinality = np.append(phi.cardinality, [new_var_card[var] for var in extra_vars])
# modifying phi1 to add new variables
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[tuple(slice_)]
phi1.variables.extend(extra_vars)
# No need to modify cardinality as we don't need it.
# rearranging the axes of phi1 to match phi
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], \
phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values + phi1.values
if not inplace:
return phi |
def ok(self):
"""Validate color selection and destroy dialog."""
rgb, hsv, hexa = self.square.get()
if self.alpha_channel:
hexa = self.hexa.get()
rgb += (self.alpha.get(),)
self.color = rgb, hsv, hexa
self.destroy() | Validate color selection and destroy dialog. | Below is the the instruction that describes the task:
### Input:
Validate color selection and destroy dialog.
### Response:
def ok(self):
"""Validate color selection and destroy dialog."""
rgb, hsv, hexa = self.square.get()
if self.alpha_channel:
hexa = self.hexa.get()
rgb += (self.alpha.get(),)
self.color = rgb, hsv, hexa
self.destroy() |
def click_at_coordinates(self, x, y):
"""
Click at (x,y) coordinates.
"""
self.device.click(int(x), int(y)) | Click at (x,y) coordinates. | Below is the the instruction that describes the task:
### Input:
Click at (x,y) coordinates.
### Response:
def click_at_coordinates(self, x, y):
"""
Click at (x,y) coordinates.
"""
self.device.click(int(x), int(y)) |
def dataset_path_iterator(file_path: str) -> Iterator[str]:
"""
An iterator returning file_paths in a directory
containing CONLL-formatted files.
"""
logger.info("Reading CONLL sentences from dataset files at: %s", file_path)
for root, _, files in list(os.walk(file_path)):
for data_file in files:
# These are a relic of the dataset pre-processing. Every
# file will be duplicated - one file called filename.gold_skel
# and one generated from the preprocessing called filename.gold_conll.
if not data_file.endswith("gold_conll"):
continue
yield os.path.join(root, data_file) | An iterator returning file_paths in a directory
containing CONLL-formatted files. | Below is the the instruction that describes the task:
### Input:
An iterator returning file_paths in a directory
containing CONLL-formatted files.
### Response:
def dataset_path_iterator(file_path: str) -> Iterator[str]:
"""
An iterator returning file_paths in a directory
containing CONLL-formatted files.
"""
logger.info("Reading CONLL sentences from dataset files at: %s", file_path)
for root, _, files in list(os.walk(file_path)):
for data_file in files:
# These are a relic of the dataset pre-processing. Every
# file will be duplicated - one file called filename.gold_skel
# and one generated from the preprocessing called filename.gold_conll.
if not data_file.endswith("gold_conll"):
continue
yield os.path.join(root, data_file) |
def get_or_create_author(self, name: str) -> Author:
"""Get an author by name, or creates one if it does not exist."""
author = self.object_cache_author.get(name)
if author is not None:
self.session.add(author)
return author
author = self.get_author_by_name(name)
if author is not None:
self.object_cache_author[name] = author
return author
author = self.object_cache_author[name] = Author.from_name(name=name)
self.session.add(author)
return author | Get an author by name, or creates one if it does not exist. | Below is the the instruction that describes the task:
### Input:
Get an author by name, or creates one if it does not exist.
### Response:
def get_or_create_author(self, name: str) -> Author:
"""Get an author by name, or creates one if it does not exist."""
author = self.object_cache_author.get(name)
if author is not None:
self.session.add(author)
return author
author = self.get_author_by_name(name)
if author is not None:
self.object_cache_author[name] = author
return author
author = self.object_cache_author[name] = Author.from_name(name=name)
self.session.add(author)
return author |
def other_Orange_tables(self):
'''
Returns the related tables as Orange example tables.
:rtype: list
'''
target_table = self.db.target_table
if not self.db.orng_tables:
return [self.convert_table(table, None) for table in self.db.tables if table != target_table]
else:
return [table for name, table in list(self.db.orng_tables.items()) if name != target_table] | Returns the related tables as Orange example tables.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns the related tables as Orange example tables.
:rtype: list
### Response:
def other_Orange_tables(self):
'''
Returns the related tables as Orange example tables.
:rtype: list
'''
target_table = self.db.target_table
if not self.db.orng_tables:
return [self.convert_table(table, None) for table in self.db.tables if table != target_table]
else:
return [table for name, table in list(self.db.orng_tables.items()) if name != target_table] |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DseOpsCenterCollector, self).get_default_config()
metrics = [
'cf-bf-false-positives',
'cf-bf-false-ratio',
'cf-bf-space-used',
'cf-keycache-hit-rate',
'cf-keycache-hits',
'cf-keycache-requests',
'cf-live-disk-used',
'cf-live-sstables',
'cf-pending-tasks',
'cf-read-latency-op',
'cf-read-ops',
'cf-rowcache-hit-rate',
'cf-rowcache-hits',
'cf-rowcache-requests',
'cf-total-disk-used',
'cf-write-latency-op',
'cf-write-ops',
'cms-collection-count',
'cms-collection-time',
'data-load',
'heap-committed',
'heap-max',
'heap-used',
'key-cache-hit-rate',
'key-cache-hits',
'key-cache-requests',
'nonheap-committed',
'nonheap-max',
'nonheap-used',
'pending-compaction-tasks',
'pending-flush-sorter-tasks',
'pending-flushes',
'pending-gossip-tasks',
'pending-hinted-handoff',
'pending-internal-responses',
'pending-memtable-post-flushers',
'pending-migrations',
'pending-misc-tasks',
'pending-read-ops',
'pending-read-repair-tasks',
'pending-repair-tasks',
'pending-repl-on-write-tasks',
'pending-request-responses',
'pending-streams',
'pending-write-ops',
'read-latency-op',
'read-ops',
'row-cache-hit-rate',
'row-cache-hits',
'row-cache-requests',
'solr-avg-time-per-req',
'solr-errors',
'solr-requests',
'solr-timeouts',
'total-bytes-compacted',
'total-compactions-completed',
'write-latency-op',
'write-ops',
]
config.update({
'host': '127.0.0.1',
'port': 8888,
'path': 'cassandra',
'node_group': '*',
'metrics': ','.join(metrics),
'default_tail_opts': '&forecast=0&node_aggregation=1',
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DseOpsCenterCollector, self).get_default_config()
metrics = [
'cf-bf-false-positives',
'cf-bf-false-ratio',
'cf-bf-space-used',
'cf-keycache-hit-rate',
'cf-keycache-hits',
'cf-keycache-requests',
'cf-live-disk-used',
'cf-live-sstables',
'cf-pending-tasks',
'cf-read-latency-op',
'cf-read-ops',
'cf-rowcache-hit-rate',
'cf-rowcache-hits',
'cf-rowcache-requests',
'cf-total-disk-used',
'cf-write-latency-op',
'cf-write-ops',
'cms-collection-count',
'cms-collection-time',
'data-load',
'heap-committed',
'heap-max',
'heap-used',
'key-cache-hit-rate',
'key-cache-hits',
'key-cache-requests',
'nonheap-committed',
'nonheap-max',
'nonheap-used',
'pending-compaction-tasks',
'pending-flush-sorter-tasks',
'pending-flushes',
'pending-gossip-tasks',
'pending-hinted-handoff',
'pending-internal-responses',
'pending-memtable-post-flushers',
'pending-migrations',
'pending-misc-tasks',
'pending-read-ops',
'pending-read-repair-tasks',
'pending-repair-tasks',
'pending-repl-on-write-tasks',
'pending-request-responses',
'pending-streams',
'pending-write-ops',
'read-latency-op',
'read-ops',
'row-cache-hit-rate',
'row-cache-hits',
'row-cache-requests',
'solr-avg-time-per-req',
'solr-errors',
'solr-requests',
'solr-timeouts',
'total-bytes-compacted',
'total-compactions-completed',
'write-latency-op',
'write-ops',
]
config.update({
'host': '127.0.0.1',
'port': 8888,
'path': 'cassandra',
'node_group': '*',
'metrics': ','.join(metrics),
'default_tail_opts': '&forecast=0&node_aggregation=1',
})
return config |
def Slot(self, slotnum):
"""
Slot sets the vtable key `voffset` to the current location in the
buffer.
"""
self.assertNested()
self.current_vtable[slotnum] = self.Offset() | Slot sets the vtable key `voffset` to the current location in the
buffer. | Below is the the instruction that describes the task:
### Input:
Slot sets the vtable key `voffset` to the current location in the
buffer.
### Response:
def Slot(self, slotnum):
"""
Slot sets the vtable key `voffset` to the current location in the
buffer.
"""
self.assertNested()
self.current_vtable[slotnum] = self.Offset() |
def default_error_handler(socket, error_name, error_message, endpoint,
msg_id, quiet):
"""This is the default error handler, you can override this when
calling :func:`socketio.socketio_manage`.
It basically sends an event through the socket with the 'error' name.
See documentation for :meth:`Socket.error`.
:param quiet: if quiet, this handler will not send a packet to the
user, but only log for the server developer.
"""
pkt = dict(type='event', name='error',
args=[error_name, error_message],
endpoint=endpoint)
if msg_id:
pkt['id'] = msg_id
# Send an error event through the Socket
if not quiet:
socket.send_packet(pkt)
# Log that error somewhere for debugging...
log.error(u"default_error_handler: {}, {} (endpoint={}, msg_id={})".format(
error_name, error_message, endpoint, msg_id
)) | This is the default error handler, you can override this when
calling :func:`socketio.socketio_manage`.
It basically sends an event through the socket with the 'error' name.
See documentation for :meth:`Socket.error`.
:param quiet: if quiet, this handler will not send a packet to the
user, but only log for the server developer. | Below is the the instruction that describes the task:
### Input:
This is the default error handler, you can override this when
calling :func:`socketio.socketio_manage`.
It basically sends an event through the socket with the 'error' name.
See documentation for :meth:`Socket.error`.
:param quiet: if quiet, this handler will not send a packet to the
user, but only log for the server developer.
### Response:
def default_error_handler(socket, error_name, error_message, endpoint,
msg_id, quiet):
"""This is the default error handler, you can override this when
calling :func:`socketio.socketio_manage`.
It basically sends an event through the socket with the 'error' name.
See documentation for :meth:`Socket.error`.
:param quiet: if quiet, this handler will not send a packet to the
user, but only log for the server developer.
"""
pkt = dict(type='event', name='error',
args=[error_name, error_message],
endpoint=endpoint)
if msg_id:
pkt['id'] = msg_id
# Send an error event through the Socket
if not quiet:
socket.send_packet(pkt)
# Log that error somewhere for debugging...
log.error(u"default_error_handler: {}, {} (endpoint={}, msg_id={})".format(
error_name, error_message, endpoint, msg_id
)) |
def write_nexus_files(self, force=False, quiet=False):
"""
Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already
exists an exception will be raised unless you use the force flag which
will remove all files in the directory.
Parameters:
-----------
force (bool):
If True then all files in {workdir}/{name}/*.nex* will be removed.
"""
## clear existing files
existing = glob.glob(os.path.join(self.workdir, self.name, "*.nex"))
if any(existing):
if force:
for rfile in existing:
os.remove(rfile)
else:
path = os.path.join(self.workdir, self.name)
raise IPyradWarningExit(EXISTING_NEX_FILES.format(path))
## parse the loci or alleles file
with open(self.files.data) as infile:
loci = iter(infile.read().strip().split("|\n"))
## use entered samples or parse them from the file
if not self.samples:
with open(self.files.data) as infile:
samples = set((i.split()[0] for i in infile.readlines() \
if "//" not in i))
else:
samples = set(self.samples)
## keep track of how many loci pass filtering
totn = len(samples)
nloci = 0
## this set is just used for matching, then we randomly
## subsample for real within the locus so it varies
if self._alleles:
msamples = {i+rbin() for i in samples}
else:
msamples = samples
## write subsampled set of loci
for loc in loci:
## get names and seqs from locus
dat = loc.split("\n")[:-1]
try:
names = [i.split()[0] for i in dat]
snames = set(names)
seqs = np.array([list(i.split()[1]) for i in dat])
except IndexError:
print(ALLELESBUGFIXED)
continue
## check name matches
if len(snames.intersection(msamples)) == totn:
## prune sample names if alleles. Done here so it is randomly
## different in every locus which allele is selected from
## each sample (e.g., 0 or 1)
if self._alleles:
_samples = [i+rbin() for i in samples]
else:
_samples = samples
## re-order seqs to be in set order
seqsamp = seqs[[names.index(tax) for tax in _samples]]
## resolve ambiguities randomly if .loci file otherwise
## sample one of the alleles if .alleles file.
if not self._alleles:
seqsamp = _resolveambig(seqsamp)
## find parsimony informative sites
if _count_PIS(seqsamp, self.params.minsnps):
## keep the locus
nloci += 1
## remove empty columns given this sampling
copied = seqsamp.copy()
copied[copied == "-"] == "N"
rmcol = np.all(copied == "N", axis=0)
seqsamp = seqsamp[:, ~rmcol]
## write nexus file
if self._alleles:
## trim off the allele number
samps = [i.rsplit("_", 1)[0] for i in _samples]
mdict = dict(zip(samps, [i.tostring() for i in seqsamp]))
else:
mdict = dict(zip(_samples, [i.tostring() for i in seqsamp]))
self._write_nex(mdict, nloci)
## quit early if using maxloci
if nloci == self.params.maxloci:
break
## print data size
if not quiet:
path = os.path.join(self.workdir, self.name)
path = path.replace(os.path.expanduser("~"), "~")
print("wrote {} nexus files to {}".format(nloci, path)) | Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already
exists an exception will be raised unless you use the force flag which
will remove all files in the directory.
Parameters:
-----------
force (bool):
If True then all files in {workdir}/{name}/*.nex* will be removed. | Below is the the instruction that describes the task:
### Input:
Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already
exists an exception will be raised unless you use the force flag which
will remove all files in the directory.
Parameters:
-----------
force (bool):
If True then all files in {workdir}/{name}/*.nex* will be removed.
### Response:
def write_nexus_files(self, force=False, quiet=False):
"""
Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already
exists an exception will be raised unless you use the force flag which
will remove all files in the directory.
Parameters:
-----------
force (bool):
If True then all files in {workdir}/{name}/*.nex* will be removed.
"""
## clear existing files
existing = glob.glob(os.path.join(self.workdir, self.name, "*.nex"))
if any(existing):
if force:
for rfile in existing:
os.remove(rfile)
else:
path = os.path.join(self.workdir, self.name)
raise IPyradWarningExit(EXISTING_NEX_FILES.format(path))
## parse the loci or alleles file
with open(self.files.data) as infile:
loci = iter(infile.read().strip().split("|\n"))
## use entered samples or parse them from the file
if not self.samples:
with open(self.files.data) as infile:
samples = set((i.split()[0] for i in infile.readlines() \
if "//" not in i))
else:
samples = set(self.samples)
## keep track of how many loci pass filtering
totn = len(samples)
nloci = 0
## this set is just used for matching, then we randomly
## subsample for real within the locus so it varies
if self._alleles:
msamples = {i+rbin() for i in samples}
else:
msamples = samples
## write subsampled set of loci
for loc in loci:
## get names and seqs from locus
dat = loc.split("\n")[:-1]
try:
names = [i.split()[0] for i in dat]
snames = set(names)
seqs = np.array([list(i.split()[1]) for i in dat])
except IndexError:
print(ALLELESBUGFIXED)
continue
## check name matches
if len(snames.intersection(msamples)) == totn:
## prune sample names if alleles. Done here so it is randomly
## different in every locus which allele is selected from
## each sample (e.g., 0 or 1)
if self._alleles:
_samples = [i+rbin() for i in samples]
else:
_samples = samples
## re-order seqs to be in set order
seqsamp = seqs[[names.index(tax) for tax in _samples]]
## resolve ambiguities randomly if .loci file otherwise
## sample one of the alleles if .alleles file.
if not self._alleles:
seqsamp = _resolveambig(seqsamp)
## find parsimony informative sites
if _count_PIS(seqsamp, self.params.minsnps):
## keep the locus
nloci += 1
## remove empty columns given this sampling
copied = seqsamp.copy()
copied[copied == "-"] == "N"
rmcol = np.all(copied == "N", axis=0)
seqsamp = seqsamp[:, ~rmcol]
## write nexus file
if self._alleles:
## trim off the allele number
samps = [i.rsplit("_", 1)[0] for i in _samples]
mdict = dict(zip(samps, [i.tostring() for i in seqsamp]))
else:
mdict = dict(zip(_samples, [i.tostring() for i in seqsamp]))
self._write_nex(mdict, nloci)
## quit early if using maxloci
if nloci == self.params.maxloci:
break
## print data size
if not quiet:
path = os.path.join(self.workdir, self.name)
path = path.replace(os.path.expanduser("~"), "~")
print("wrote {} nexus files to {}".format(nloci, path)) |
def _parse(self, filename):
"""Opens data file and for each line, calls _eat_name_line"""
self.names = {}
with codecs.open(filename, encoding="iso8859-1") as f:
for line in f:
if any(map(lambda c: 128 < ord(c) < 160, line)):
line = line.encode("iso8859-1").decode("windows-1252")
self._eat_name_line(line.strip()) | Opens data file and for each line, calls _eat_name_line | Below is the the instruction that describes the task:
### Input:
Opens data file and for each line, calls _eat_name_line
### Response:
def _parse(self, filename):
"""Opens data file and for each line, calls _eat_name_line"""
self.names = {}
with codecs.open(filename, encoding="iso8859-1") as f:
for line in f:
if any(map(lambda c: 128 < ord(c) < 160, line)):
line = line.encode("iso8859-1").decode("windows-1252")
self._eat_name_line(line.strip()) |
def _extract_stack(limit=10):
"""Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""
frame = sys._getframe().f_back
try:
stack = traceback.StackSummary.extract(
traceback.walk_stack(frame), lookup_lines=False)
finally:
del frame
apg_path = asyncpg.__path__[0]
i = 0
while i < len(stack) and stack[i][0].startswith(apg_path):
i += 1
stack = stack[i:i + limit]
stack.reverse()
return ''.join(traceback.format_list(stack)) | Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode. | Below is the the instruction that describes the task:
### Input:
Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
### Response:
def _extract_stack(limit=10):
"""Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""
frame = sys._getframe().f_back
try:
stack = traceback.StackSummary.extract(
traceback.walk_stack(frame), lookup_lines=False)
finally:
del frame
apg_path = asyncpg.__path__[0]
i = 0
while i < len(stack) and stack[i][0].startswith(apg_path):
i += 1
stack = stack[i:i + limit]
stack.reverse()
return ''.join(traceback.format_list(stack)) |
def _all_recall_native_type(self, data, ptitem, prefix):
"""Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not.
"""
typestr = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.SCALAR_TYPE)
colltype = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.COLL_TYPE)
type_changed = False
# Check what the original data type was from the hdf5 node attributes
if colltype == HDF5StorageService.COLL_SCALAR:
# Here data item was a scalar
if isinstance(data, np.ndarray):
# If we recall a numpy scalar, pytables loads a 1d array :-/
# So we have to change it to a real scalar value
data = np.array([data])[0]
type_changed = True
if not typestr is None:
# Check if current type and stored type match
# if not convert the data
if typestr != type(data).__name__:
if typestr == str.__name__:
data = data.decode(self._encoding)
else:
try:
data = pypetconstants.PARAMETERTYPEDICT[typestr](data)
except KeyError:
# For compatibility with files from older pypet versions
data = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](data)
type_changed = True
elif (colltype == HDF5StorageService.COLL_TUPLE or
colltype == HDF5StorageService.COLL_LIST):
# Here data item was originally a tuple or a list
if type(data) is not list and type is not tuple:
# If the original type cannot be recalled, first convert it to a list
type_changed = True
data = list(data)
if len(data) > 0:
first_item = data[0]
# Check if the type of the first item was conserved
if not typestr == type(first_item).__name__:
if not isinstance(data, list):
data = list(data)
# If type was not conserved we need to convert all items
# in the list or tuple
for idx, item in enumerate(data):
if typestr == str.__name__:
data[idx] = data[idx].decode(self._encoding)
else:
try:
data[idx] = pypetconstants.PARAMETERTYPEDICT[typestr](item)
except KeyError:
# For compatibility with files from older pypet versions:
data[idx] = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](item)
type_changed = True
if colltype == HDF5StorageService.COLL_TUPLE:
# If it was originally a tuple we need to convert it back to tuple
if type(data) is not tuple:
data = tuple(data)
type_changed = True
elif colltype == HDF5StorageService.COLL_EMPTY_DICT:
data = {}
type_changed = True
elif isinstance(data, np.ndarray):
if typestr == str.__name__:
data = np.core.defchararray.decode(data, self._encoding)
type_changed = True
if colltype == HDF5StorageService.COLL_MATRIX:
# Here data item was originally a matrix
data = np.matrix(data)
type_changed = True
return data, type_changed | Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not. | Below is the the instruction that describes the task:
### Input:
Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not.
### Response:
def _all_recall_native_type(self, data, ptitem, prefix):
"""Checks if loaded data has the type it was stored in. If not converts it.
:param data: Data item to be checked and converted
:param ptitem: HDf5 Node or Leaf from where data was loaded
:param prefix: Prefix for recalling the data type from the hdf5 node attributes
:return:
Tuple, first item is the (converted) `data` item, second boolean whether
item was converted or not.
"""
typestr = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.SCALAR_TYPE)
colltype = self._all_get_from_attrs(ptitem, prefix + HDF5StorageService.COLL_TYPE)
type_changed = False
# Check what the original data type was from the hdf5 node attributes
if colltype == HDF5StorageService.COLL_SCALAR:
# Here data item was a scalar
if isinstance(data, np.ndarray):
# If we recall a numpy scalar, pytables loads a 1d array :-/
# So we have to change it to a real scalar value
data = np.array([data])[0]
type_changed = True
if not typestr is None:
# Check if current type and stored type match
# if not convert the data
if typestr != type(data).__name__:
if typestr == str.__name__:
data = data.decode(self._encoding)
else:
try:
data = pypetconstants.PARAMETERTYPEDICT[typestr](data)
except KeyError:
# For compatibility with files from older pypet versions
data = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](data)
type_changed = True
elif (colltype == HDF5StorageService.COLL_TUPLE or
colltype == HDF5StorageService.COLL_LIST):
# Here data item was originally a tuple or a list
if type(data) is not list and type is not tuple:
# If the original type cannot be recalled, first convert it to a list
type_changed = True
data = list(data)
if len(data) > 0:
first_item = data[0]
# Check if the type of the first item was conserved
if not typestr == type(first_item).__name__:
if not isinstance(data, list):
data = list(data)
# If type was not conserved we need to convert all items
# in the list or tuple
for idx, item in enumerate(data):
if typestr == str.__name__:
data[idx] = data[idx].decode(self._encoding)
else:
try:
data[idx] = pypetconstants.PARAMETERTYPEDICT[typestr](item)
except KeyError:
# For compatibility with files from older pypet versions:
data[idx] = pypetconstants.COMPATPARAMETERTYPEDICT[typestr](item)
type_changed = True
if colltype == HDF5StorageService.COLL_TUPLE:
# If it was originally a tuple we need to convert it back to tuple
if type(data) is not tuple:
data = tuple(data)
type_changed = True
elif colltype == HDF5StorageService.COLL_EMPTY_DICT:
data = {}
type_changed = True
elif isinstance(data, np.ndarray):
if typestr == str.__name__:
data = np.core.defchararray.decode(data, self._encoding)
type_changed = True
if colltype == HDF5StorageService.COLL_MATRIX:
# Here data item was originally a matrix
data = np.matrix(data)
type_changed = True
return data, type_changed |
def show_input(self, template_helper, language, seed):
""" Show MatchProblem """
header = ParsableText(self.gettext(language, self._header), "rst",
translation=self._translations.get(language, gettext.NullTranslations()))
return str(DisplayableMatchProblem.get_renderer(template_helper).tasks.match(self.get_id(), header)) | Show MatchProblem | Below is the the instruction that describes the task:
### Input:
Show MatchProblem
### Response:
def show_input(self, template_helper, language, seed):
""" Show MatchProblem """
header = ParsableText(self.gettext(language, self._header), "rst",
translation=self._translations.get(language, gettext.NullTranslations()))
return str(DisplayableMatchProblem.get_renderer(template_helper).tasks.match(self.get_id(), header)) |
def ToScriptHash(self, address):
"""
Retrieve the script_hash based from an address.
Args:
address (str): a base58 encoded address.
Raises:
ValuesError: if an invalid address is supplied or the coin version is incorrect
Exception: if the address string does not start with 'A' or the checksum fails
Returns:
UInt160: script hash.
"""
if len(address) == 34:
if address[0] == 'A':
data = b58decode(address)
if data[0] != self.AddressVersion:
raise ValueError('Not correct Coin Version')
checksum = Crypto.Default().Hash256(data[:21])[:4]
if checksum != data[21:]:
raise Exception('Address format error')
return UInt160(data=data[1:21])
else:
raise Exception('Address format error')
else:
raise ValueError('Not correct Address, wrong length.') | Retrieve the script_hash based from an address.
Args:
address (str): a base58 encoded address.
Raises:
ValuesError: if an invalid address is supplied or the coin version is incorrect
Exception: if the address string does not start with 'A' or the checksum fails
Returns:
UInt160: script hash. | Below is the the instruction that describes the task:
### Input:
Retrieve the script_hash based from an address.
Args:
address (str): a base58 encoded address.
Raises:
ValuesError: if an invalid address is supplied or the coin version is incorrect
Exception: if the address string does not start with 'A' or the checksum fails
Returns:
UInt160: script hash.
### Response:
def ToScriptHash(self, address):
"""
Retrieve the script_hash based from an address.
Args:
address (str): a base58 encoded address.
Raises:
ValuesError: if an invalid address is supplied or the coin version is incorrect
Exception: if the address string does not start with 'A' or the checksum fails
Returns:
UInt160: script hash.
"""
if len(address) == 34:
if address[0] == 'A':
data = b58decode(address)
if data[0] != self.AddressVersion:
raise ValueError('Not correct Coin Version')
checksum = Crypto.Default().Hash256(data[:21])[:4]
if checksum != data[21:]:
raise Exception('Address format error')
return UInt160(data=data[1:21])
else:
raise Exception('Address format error')
else:
raise ValueError('Not correct Address, wrong length.') |
def print_partlist(input, timeout=20, showgui=False):
'''print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
'''
print raw_partlist(input=input, timeout=timeout, showgui=showgui) | print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None | Below is the the instruction that describes the task:
### Input:
print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
### Response:
def print_partlist(input, timeout=20, showgui=False):
'''print partlist text delivered by eagle
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
'''
print raw_partlist(input=input, timeout=timeout, showgui=showgui) |
def __header(self, line):
"""Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer
"""
self.cpu_number = len(line.split())
return self.cpu_number | Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer | Below is the the instruction that describes the task:
### Input:
Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer
### Response:
def __header(self, line):
"""Build the header (contain the number of CPU).
CPU0 CPU1 CPU2 CPU3
0: 21 0 0 0 IO-APIC 2-edge timer
"""
self.cpu_number = len(line.split())
return self.cpu_number |
def cache(self, key, value):
"""
Add an entry to the cache.
A weakref to the value is stored, rather than a direct reference. The
value must have a C{__finalizer__} method that returns a callable which
will be invoked when the weakref is broken.
@param key: The key identifying the cache entry.
@param value: The value for the cache entry.
"""
fin = value.__finalizer__()
try:
# It's okay if there's already a cache entry for this key as long
# as the weakref has already been broken. See the comment in
# get() for an explanation of why this might happen.
if self.data[key]() is not None:
raise CacheInconsistency(
"Duplicate cache key: %r %r %r" % (
key, value, self.data[key]))
except KeyError:
pass
callback = createCacheRemoveCallback(self._ref(self), key, fin)
self.data[key] = self._ref(value, callback)
return value | Add an entry to the cache.
A weakref to the value is stored, rather than a direct reference. The
value must have a C{__finalizer__} method that returns a callable which
will be invoked when the weakref is broken.
@param key: The key identifying the cache entry.
@param value: The value for the cache entry. | Below is the the instruction that describes the task:
### Input:
Add an entry to the cache.
A weakref to the value is stored, rather than a direct reference. The
value must have a C{__finalizer__} method that returns a callable which
will be invoked when the weakref is broken.
@param key: The key identifying the cache entry.
@param value: The value for the cache entry.
### Response:
def cache(self, key, value):
"""
Add an entry to the cache.
A weakref to the value is stored, rather than a direct reference. The
value must have a C{__finalizer__} method that returns a callable which
will be invoked when the weakref is broken.
@param key: The key identifying the cache entry.
@param value: The value for the cache entry.
"""
fin = value.__finalizer__()
try:
# It's okay if there's already a cache entry for this key as long
# as the weakref has already been broken. See the comment in
# get() for an explanation of why this might happen.
if self.data[key]() is not None:
raise CacheInconsistency(
"Duplicate cache key: %r %r %r" % (
key, value, self.data[key]))
except KeyError:
pass
callback = createCacheRemoveCallback(self._ref(self), key, fin)
self.data[key] = self._ref(value, callback)
return value |
def callback_liveIn_button_press(red_clicks, blue_clicks, green_clicks,
rc_timestamp, bc_timestamp, gc_timestamp, **kwargs): # pylint: disable=unused-argument
'Input app button pressed, so do something interesting'
if not rc_timestamp:
rc_timestamp = 0
if not bc_timestamp:
bc_timestamp = 0
if not gc_timestamp:
gc_timestamp = 0
if (rc_timestamp + bc_timestamp + gc_timestamp) < 1:
change_col = None
timestamp = 0
else:
if rc_timestamp > bc_timestamp:
change_col = "red"
timestamp = rc_timestamp
else:
change_col = "blue"
timestamp = bc_timestamp
if gc_timestamp > timestamp:
timestamp = gc_timestamp
change_col = "green"
value = {'red_clicks':red_clicks,
'blue_clicks':blue_clicks,
'green_clicks':green_clicks,
'click_colour':change_col,
'click_timestamp':timestamp,
'user':str(kwargs.get('user', 'UNKNOWN'))}
send_to_pipe_channel(channel_name="live_button_counter",
label="named_counts",
value=value)
return "Number of local clicks so far is %s red and %s blue; last change is %s at %s" % (red_clicks,
blue_clicks,
change_col,
datetime.fromtimestamp(0.001*timestamp)) | Input app button pressed, so do something interesting | Below is the the instruction that describes the task:
### Input:
Input app button pressed, so do something interesting
### Response:
def callback_liveIn_button_press(red_clicks, blue_clicks, green_clicks,
rc_timestamp, bc_timestamp, gc_timestamp, **kwargs): # pylint: disable=unused-argument
'Input app button pressed, so do something interesting'
if not rc_timestamp:
rc_timestamp = 0
if not bc_timestamp:
bc_timestamp = 0
if not gc_timestamp:
gc_timestamp = 0
if (rc_timestamp + bc_timestamp + gc_timestamp) < 1:
change_col = None
timestamp = 0
else:
if rc_timestamp > bc_timestamp:
change_col = "red"
timestamp = rc_timestamp
else:
change_col = "blue"
timestamp = bc_timestamp
if gc_timestamp > timestamp:
timestamp = gc_timestamp
change_col = "green"
value = {'red_clicks':red_clicks,
'blue_clicks':blue_clicks,
'green_clicks':green_clicks,
'click_colour':change_col,
'click_timestamp':timestamp,
'user':str(kwargs.get('user', 'UNKNOWN'))}
send_to_pipe_channel(channel_name="live_button_counter",
label="named_counts",
value=value)
return "Number of local clicks so far is %s red and %s blue; last change is %s at %s" % (red_clicks,
blue_clicks,
change_col,
datetime.fromtimestamp(0.001*timestamp)) |
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name_, shape_)
types : a tuple of (name_, np.dtype)
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes] | Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name_, shape_)
types : a tuple of (name_, np.dtype) | Below is the the instruction that describes the task:
### Input:
Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name_, shape_)
types : a tuple of (name_, np.dtype)
### Response:
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name_, shape_)
types : a tuple of (name_, np.dtype)
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes] |
def release(self, key, owner):
"""Release lock with given name.
`key` - lock name
`owner` - name of application/component/whatever which held a lock
Raises `MongoLockException` if no such a lock.
"""
status = self.collection.find_and_modify(
{'_id': key, 'owner': owner},
{'locked': False, 'owner': None, 'created': None, 'expire': None}
) | Release lock with given name.
`key` - lock name
`owner` - name of application/component/whatever which held a lock
Raises `MongoLockException` if no such a lock. | Below is the the instruction that describes the task:
### Input:
Release lock with given name.
`key` - lock name
`owner` - name of application/component/whatever which held a lock
Raises `MongoLockException` if no such a lock.
### Response:
def release(self, key, owner):
"""Release lock with given name.
`key` - lock name
`owner` - name of application/component/whatever which held a lock
Raises `MongoLockException` if no such a lock.
"""
status = self.collection.find_and_modify(
{'_id': key, 'owner': owner},
{'locked': False, 'owner': None, 'created': None, 'expire': None}
) |
def is_permitted(self, identifiers, permission_s):
"""
If the authorization info cannot be obtained from the accountstore,
permission check tuple yields False.
:type identifiers: subject_abcs.IdentifierCollection
:param permission_s: a collection of one or more permissions, represented
as string-based permissions or Permission objects
and NEVER comingled types
:type permission_s: list of string(s)
:yields: tuple(Permission, Boolean)
"""
identifier = identifiers.primary_identifier
for required in permission_s:
domain = Permission.get_domain(required)
# assigned is a list of json blobs:
assigned = self.get_authzd_permissions(identifier, domain)
is_permitted = False
for perms_blob in assigned:
is_permitted = self.permission_verifier.\
is_permitted_from_json(required, perms_blob)
yield (required, is_permitted) | If the authorization info cannot be obtained from the accountstore,
permission check tuple yields False.
:type identifiers: subject_abcs.IdentifierCollection
:param permission_s: a collection of one or more permissions, represented
as string-based permissions or Permission objects
and NEVER comingled types
:type permission_s: list of string(s)
:yields: tuple(Permission, Boolean) | Below is the the instruction that describes the task:
### Input:
If the authorization info cannot be obtained from the accountstore,
permission check tuple yields False.
:type identifiers: subject_abcs.IdentifierCollection
:param permission_s: a collection of one or more permissions, represented
as string-based permissions or Permission objects
and NEVER comingled types
:type permission_s: list of string(s)
:yields: tuple(Permission, Boolean)
### Response:
def is_permitted(self, identifiers, permission_s):
"""
If the authorization info cannot be obtained from the accountstore,
permission check tuple yields False.
:type identifiers: subject_abcs.IdentifierCollection
:param permission_s: a collection of one or more permissions, represented
as string-based permissions or Permission objects
and NEVER comingled types
:type permission_s: list of string(s)
:yields: tuple(Permission, Boolean)
"""
identifier = identifiers.primary_identifier
for required in permission_s:
domain = Permission.get_domain(required)
# assigned is a list of json blobs:
assigned = self.get_authzd_permissions(identifier, domain)
is_permitted = False
for perms_blob in assigned:
is_permitted = self.permission_verifier.\
is_permitted_from_json(required, perms_blob)
yield (required, is_permitted) |
def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs)) | Perform a dry-run of the task | Below is the the instruction that describes the task:
### Input:
Perform a dry-run of the task
### Response:
def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs)) |
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None | Clear ConfigObj instance and restore to 'freshly created' state. | Below is the the instruction that describes the task:
### Input:
Clear ConfigObj instance and restore to 'freshly created' state.
### Response:
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None |
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
OSFileEntry: a file entry or None if not available.
"""
if platform.system() == 'Windows':
# Return the root with the drive letter of the volume the current
# working directory is on.
location = os.getcwd()
location, _, _ = location.partition('\\')
location = '{0:s}\\'.format(location)
else:
location = '/'
if not os.path.exists(location):
return None
path_spec = os_path_spec.OSPathSpec(location=location)
return self.GetFileEntryByPathSpec(path_spec) | Retrieves the root file entry.
Returns:
OSFileEntry: a file entry or None if not available. | Below is the the instruction that describes the task:
### Input:
Retrieves the root file entry.
Returns:
OSFileEntry: a file entry or None if not available.
### Response:
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
OSFileEntry: a file entry or None if not available.
"""
if platform.system() == 'Windows':
# Return the root with the drive letter of the volume the current
# working directory is on.
location = os.getcwd()
location, _, _ = location.partition('\\')
location = '{0:s}\\'.format(location)
else:
location = '/'
if not os.path.exists(location):
return None
path_spec = os_path_spec.OSPathSpec(location=location)
return self.GetFileEntryByPathSpec(path_spec) |
def get_delete_security_group_rule_commands(self, sg_id, sg_rule):
"""Commands for removing rule from ACLS"""
return self._get_rule_cmds(sg_id, sg_rule, delete=True) | Commands for removing rule from ACLS | Below is the the instruction that describes the task:
### Input:
Commands for removing rule from ACLS
### Response:
def get_delete_security_group_rule_commands(self, sg_id, sg_rule):
"""Commands for removing rule from ACLS"""
return self._get_rule_cmds(sg_id, sg_rule, delete=True) |
def artists(self, spotify_ids):
"""Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with.
"""
route = Route('GET', '/artists')
payload = {'ids': spotify_ids}
return self.request(route, params=payload) | Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with. | Below is the the instruction that describes the task:
### Input:
Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with.
### Response:
def artists(self, spotify_ids):
"""Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with.
"""
route = Route('GET', '/artists')
payload = {'ids': spotify_ids}
return self.request(route, params=payload) |
def get_terminal_converted(self, attr):
"""
Returns the value of the specified attribute converted to a
representation value.
:param attr: Attribute to retrieve.
:type attr: :class:`everest.representers.attributes.MappedAttribute`
:returns: Representation string.
"""
value = self.data.get(attr.repr_name)
return self.converter_registry.convert_to_representation(
value,
attr.value_type) | Returns the value of the specified attribute converted to a
representation value.
:param attr: Attribute to retrieve.
:type attr: :class:`everest.representers.attributes.MappedAttribute`
:returns: Representation string. | Below is the the instruction that describes the task:
### Input:
Returns the value of the specified attribute converted to a
representation value.
:param attr: Attribute to retrieve.
:type attr: :class:`everest.representers.attributes.MappedAttribute`
:returns: Representation string.
### Response:
def get_terminal_converted(self, attr):
"""
Returns the value of the specified attribute converted to a
representation value.
:param attr: Attribute to retrieve.
:type attr: :class:`everest.representers.attributes.MappedAttribute`
:returns: Representation string.
"""
value = self.data.get(attr.repr_name)
return self.converter_registry.convert_to_representation(
value,
attr.value_type) |
def iter_steps(self):
"""Iterate over steps in the parsed file."""
for func, decorator in self._iter_step_func_decorators():
step = self._step_decorator_args(decorator)
if step:
yield step, func.name, self._span_for_node(func, True) | Iterate over steps in the parsed file. | Below is the the instruction that describes the task:
### Input:
Iterate over steps in the parsed file.
### Response:
def iter_steps(self):
"""Iterate over steps in the parsed file."""
for func, decorator in self._iter_step_func_decorators():
step = self._step_decorator_args(decorator)
if step:
yield step, func.name, self._span_for_node(func, True) |
def get_topic_keyword_dictionary():
"""
Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary.
"""
topic_keyword_dictionary = dict()
file_row_gen = get_file_row_generator(get_package_path() + "/twitter/res/topics/topic_keyword_mapping" + ".txt",
",",
"utf-8")
for file_row in file_row_gen:
topic_keyword_dictionary[file_row[0]] = set([keyword for keyword in file_row[1:]])
return topic_keyword_dictionary | Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary. | Below is the the instruction that describes the task:
### Input:
Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary.
### Response:
def get_topic_keyword_dictionary():
"""
Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary.
"""
topic_keyword_dictionary = dict()
file_row_gen = get_file_row_generator(get_package_path() + "/twitter/res/topics/topic_keyword_mapping" + ".txt",
",",
"utf-8")
for file_row in file_row_gen:
topic_keyword_dictionary[file_row[0]] = set([keyword for keyword in file_row[1:]])
return topic_keyword_dictionary |
def vq_nearest_neighbor(x, hparams):
"""Find the nearest element in means to elements in x."""
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if hparams.bottleneck_kind == "em":
x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=bottleneck_size)
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
x_means = tf.matmul(x_means_hot, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss | Find the nearest element in means to elements in x. | Below is the the instruction that describes the task:
### Input:
Find the nearest element in means to elements in x.
### Response:
def vq_nearest_neighbor(x, hparams):
"""Find the nearest element in means to elements in x."""
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if hparams.bottleneck_kind == "em":
x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=bottleneck_size)
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
x_means = tf.matmul(x_means_hot, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss |
def buy_market(self, quantity, **kwargs):
""" Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity
"""
kwargs['limit_price'] = 0
kwargs['order_type'] = "MARKET"
self.parent.order("BUY", self, quantity=quantity, **kwargs) | Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity | Below is the the instruction that describes the task:
### Input:
Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity
### Response:
def buy_market(self, quantity, **kwargs):
""" Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its
`optional parameters <#qtpylib.instrument.Instrument.order>`_
:Parameters:
quantity : int
Order quantity
"""
kwargs['limit_price'] = 0
kwargs['order_type'] = "MARKET"
self.parent.order("BUY", self, quantity=quantity, **kwargs) |
def _get_assistants_snippets(path, name):
'''Get Assistants and Snippets for a given DAP name on a given path'''
result = []
subdirs = {'assistants': 2, 'snippets': 1} # Values used for stripping leading path tokens
for loc in subdirs:
for root, dirs, files in os.walk(os.path.join(path, loc)):
for filename in [utils.strip_prefix(os.path.join(root, f), path) for f in files]:
stripped = os.path.sep.join(filename.split(os.path.sep)[subdirs[loc]:])
if stripped.startswith(os.path.join(name, '')) or stripped == name + '.yaml':
result.append(os.path.join('fakeroot', filename))
return result | Get Assistants and Snippets for a given DAP name on a given path | Below is the the instruction that describes the task:
### Input:
Get Assistants and Snippets for a given DAP name on a given path
### Response:
def _get_assistants_snippets(path, name):
'''Get Assistants and Snippets for a given DAP name on a given path'''
result = []
subdirs = {'assistants': 2, 'snippets': 1} # Values used for stripping leading path tokens
for loc in subdirs:
for root, dirs, files in os.walk(os.path.join(path, loc)):
for filename in [utils.strip_prefix(os.path.join(root, f), path) for f in files]:
stripped = os.path.sep.join(filename.split(os.path.sep)[subdirs[loc]:])
if stripped.startswith(os.path.join(name, '')) or stripped == name + '.yaml':
result.append(os.path.join('fakeroot', filename))
return result |
def write(self, text):
"""Write text in the terminal without breaking the spinner."""
# similar to tqdm.write()
# https://pypi.python.org/pypi/tqdm#writing-messages
sys.stdout.write("\r")
self._clear_line()
_text = to_unicode(text)
if PY2:
_text = _text.encode(ENCODING)
# Ensure output is bytes for Py2 and Unicode for Py3
assert isinstance(_text, builtin_str)
sys.stdout.write("{0}\n".format(_text)) | Write text in the terminal without breaking the spinner. | Below is the the instruction that describes the task:
### Input:
Write text in the terminal without breaking the spinner.
### Response:
def write(self, text):
"""Write text in the terminal without breaking the spinner."""
# similar to tqdm.write()
# https://pypi.python.org/pypi/tqdm#writing-messages
sys.stdout.write("\r")
self._clear_line()
_text = to_unicode(text)
if PY2:
_text = _text.encode(ENCODING)
# Ensure output is bytes for Py2 and Unicode for Py3
assert isinstance(_text, builtin_str)
sys.stdout.write("{0}\n".format(_text)) |
def format_sql(self):
"""
Builds the sql in a format that is easy for humans to read and debug
:return: The formatted sql for this query
:rtype: str
"""
# TODO: finish adding the other parts of the sql generation
sql = ''
# build SELECT
select_segment = self.build_select_fields()
select_segment = select_segment.replace('SELECT ', '', 1)
fields = [field.strip() for field in select_segment.split(',')]
sql += 'SELECT\n\t{0}\n'.format(',\n\t'.join(fields))
# build FROM
from_segment = self.build_from_table()
from_segment = from_segment.replace('FROM ', '', 1)
tables = [table.strip() for table in from_segment.split(',')]
sql += 'FROM\n\t{0}\n'.format(',\n\t'.join(tables))
# build ORDER BY
order_by_segment = self.build_order_by()
if len(order_by_segment):
order_by_segment = order_by_segment.replace('ORDER BY ', '', 1)
sorters = [sorter.strip() for sorter in order_by_segment.split(',')]
sql += 'ORDER BY\n\t{0}\n'.format(',\n\t'.join(sorters))
# build LIMIT
limit_segment = self.build_limit()
if len(limit_segment):
if 'LIMIT' in limit_segment:
limit_segment = limit_segment.replace('LIMIT ', 'LIMIT\n\t', 1)
if 'OFFSET' in limit_segment:
limit_segment = limit_segment.replace('OFFSET ', '\nOFFSET\n\t', 1)
elif 'OFFSET' in limit_segment:
limit_segment = limit_segment.replace('OFFSET ', 'OFFSET\n\t', 1)
sql += limit_segment
return sql | Builds the sql in a format that is easy for humans to read and debug
:return: The formatted sql for this query
:rtype: str | Below is the the instruction that describes the task:
### Input:
Builds the sql in a format that is easy for humans to read and debug
:return: The formatted sql for this query
:rtype: str
### Response:
def format_sql(self):
"""
Builds the sql in a format that is easy for humans to read and debug
:return: The formatted sql for this query
:rtype: str
"""
# TODO: finish adding the other parts of the sql generation
sql = ''
# build SELECT
select_segment = self.build_select_fields()
select_segment = select_segment.replace('SELECT ', '', 1)
fields = [field.strip() for field in select_segment.split(',')]
sql += 'SELECT\n\t{0}\n'.format(',\n\t'.join(fields))
# build FROM
from_segment = self.build_from_table()
from_segment = from_segment.replace('FROM ', '', 1)
tables = [table.strip() for table in from_segment.split(',')]
sql += 'FROM\n\t{0}\n'.format(',\n\t'.join(tables))
# build ORDER BY
order_by_segment = self.build_order_by()
if len(order_by_segment):
order_by_segment = order_by_segment.replace('ORDER BY ', '', 1)
sorters = [sorter.strip() for sorter in order_by_segment.split(',')]
sql += 'ORDER BY\n\t{0}\n'.format(',\n\t'.join(sorters))
# build LIMIT
limit_segment = self.build_limit()
if len(limit_segment):
if 'LIMIT' in limit_segment:
limit_segment = limit_segment.replace('LIMIT ', 'LIMIT\n\t', 1)
if 'OFFSET' in limit_segment:
limit_segment = limit_segment.replace('OFFSET ', '\nOFFSET\n\t', 1)
elif 'OFFSET' in limit_segment:
limit_segment = limit_segment.replace('OFFSET ', 'OFFSET\n\t', 1)
sql += limit_segment
return sql |
def buy_market_order(self, amount):
"""Place a buy order at market price.
:param amount: Amount of major currency to buy at market price.
:type amount: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict
"""
amount = str(amount)
self._log("buy {} {} at market price".format(amount, self.major))
return self._rest_client.post(
endpoint='/buy',
payload={'book': self.name, 'amount': amount}
) | Place a buy order at market price.
:param amount: Amount of major currency to buy at market price.
:type amount: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Place a buy order at market price.
:param amount: Amount of major currency to buy at market price.
:type amount: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict
### Response:
def buy_market_order(self, amount):
"""Place a buy order at market price.
:param amount: Amount of major currency to buy at market price.
:type amount: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict
"""
amount = str(amount)
self._log("buy {} {} at market price".format(amount, self.major))
return self._rest_client.post(
endpoint='/buy',
payload={'book': self.name, 'amount': amount}
) |
def _initActions(self):
"""Init shortcuts for text editing
"""
def createAction(text, shortcut, slot, iconFileName=None):
"""Create QAction with given parameters and add to the widget
"""
action = QAction(text, self)
if iconFileName is not None:
action.setIcon(getIcon(iconFileName))
keySeq = shortcut if isinstance(shortcut, QKeySequence) else QKeySequence(shortcut)
action.setShortcut(keySeq)
action.setShortcutContext(Qt.WidgetShortcut)
action.triggered.connect(slot)
self.addAction(action)
return action
# scrolling
self.scrollUpAction = createAction('Scroll up', 'Ctrl+Up',
lambda: self._onShortcutScroll(down = False),
'go-up')
self.scrollDownAction = createAction('Scroll down', 'Ctrl+Down',
lambda: self._onShortcutScroll(down = True),
'go-down')
self.selectAndScrollUpAction = createAction('Select and scroll Up', 'Ctrl+Shift+Up',
lambda: self._onShortcutSelectAndScroll(down = False))
self.selectAndScrollDownAction = createAction('Select and scroll Down', 'Ctrl+Shift+Down',
lambda: self._onShortcutSelectAndScroll(down = True))
# indentation
self.increaseIndentAction = createAction('Increase indentation', 'Tab',
self._onShortcutIndent,
'format-indent-more')
self.decreaseIndentAction = createAction('Decrease indentation', 'Shift+Tab',
lambda: self._indenter.onChangeSelectedBlocksIndent(increase = False),
'format-indent-less')
self.autoIndentLineAction = createAction('Autoindent line', 'Ctrl+I',
self._indenter.onAutoIndentTriggered)
self.indentWithSpaceAction = createAction('Indent with 1 space', 'Ctrl+Shift+Space',
lambda: self._indenter.onChangeSelectedBlocksIndent(increase=True,
withSpace=True))
self.unIndentWithSpaceAction = createAction('Unindent with 1 space', 'Ctrl+Shift+Backspace',
lambda: self._indenter.onChangeSelectedBlocksIndent(increase=False,
withSpace=True))
# editing
self.undoAction = createAction('Undo', QKeySequence.Undo,
self.undo, 'edit-undo')
self.redoAction = createAction('Redo', QKeySequence.Redo,
self.redo, 'edit-redo')
self.moveLineUpAction = createAction('Move line up', 'Alt+Up',
lambda: self._onShortcutMoveLine(down = False), 'go-up')
self.moveLineDownAction = createAction('Move line down', 'Alt+Down',
lambda: self._onShortcutMoveLine(down = True), 'go-down')
self.deleteLineAction = createAction('Delete line', 'Alt+Del', self._onShortcutDeleteLine, 'edit-delete')
self.cutLineAction = createAction('Cut line', 'Alt+X', self._onShortcutCutLine, 'edit-cut')
self.copyLineAction = createAction('Copy line', 'Alt+C', self._onShortcutCopyLine, 'edit-copy')
self.pasteLineAction = createAction('Paste line', 'Alt+V', self._onShortcutPasteLine, 'edit-paste')
self.duplicateLineAction = createAction('Duplicate line', 'Alt+D', self._onShortcutDuplicateLine)
self.invokeCompletionAction = createAction('Invoke completion', 'Ctrl+Space', self._completer.invokeCompletion)
# other
self.printAction = createAction('Print', 'Ctrl+P', self._onShortcutPrint, 'document-print') | Init shortcuts for text editing | Below is the the instruction that describes the task:
### Input:
Init shortcuts for text editing
### Response:
def _initActions(self):
"""Init shortcuts for text editing
"""
def createAction(text, shortcut, slot, iconFileName=None):
"""Create QAction with given parameters and add to the widget
"""
action = QAction(text, self)
if iconFileName is not None:
action.setIcon(getIcon(iconFileName))
keySeq = shortcut if isinstance(shortcut, QKeySequence) else QKeySequence(shortcut)
action.setShortcut(keySeq)
action.setShortcutContext(Qt.WidgetShortcut)
action.triggered.connect(slot)
self.addAction(action)
return action
# scrolling
self.scrollUpAction = createAction('Scroll up', 'Ctrl+Up',
lambda: self._onShortcutScroll(down = False),
'go-up')
self.scrollDownAction = createAction('Scroll down', 'Ctrl+Down',
lambda: self._onShortcutScroll(down = True),
'go-down')
self.selectAndScrollUpAction = createAction('Select and scroll Up', 'Ctrl+Shift+Up',
lambda: self._onShortcutSelectAndScroll(down = False))
self.selectAndScrollDownAction = createAction('Select and scroll Down', 'Ctrl+Shift+Down',
lambda: self._onShortcutSelectAndScroll(down = True))
# indentation
self.increaseIndentAction = createAction('Increase indentation', 'Tab',
self._onShortcutIndent,
'format-indent-more')
self.decreaseIndentAction = createAction('Decrease indentation', 'Shift+Tab',
lambda: self._indenter.onChangeSelectedBlocksIndent(increase = False),
'format-indent-less')
self.autoIndentLineAction = createAction('Autoindent line', 'Ctrl+I',
self._indenter.onAutoIndentTriggered)
self.indentWithSpaceAction = createAction('Indent with 1 space', 'Ctrl+Shift+Space',
lambda: self._indenter.onChangeSelectedBlocksIndent(increase=True,
withSpace=True))
self.unIndentWithSpaceAction = createAction('Unindent with 1 space', 'Ctrl+Shift+Backspace',
lambda: self._indenter.onChangeSelectedBlocksIndent(increase=False,
withSpace=True))
# editing
self.undoAction = createAction('Undo', QKeySequence.Undo,
self.undo, 'edit-undo')
self.redoAction = createAction('Redo', QKeySequence.Redo,
self.redo, 'edit-redo')
self.moveLineUpAction = createAction('Move line up', 'Alt+Up',
lambda: self._onShortcutMoveLine(down = False), 'go-up')
self.moveLineDownAction = createAction('Move line down', 'Alt+Down',
lambda: self._onShortcutMoveLine(down = True), 'go-down')
self.deleteLineAction = createAction('Delete line', 'Alt+Del', self._onShortcutDeleteLine, 'edit-delete')
self.cutLineAction = createAction('Cut line', 'Alt+X', self._onShortcutCutLine, 'edit-cut')
self.copyLineAction = createAction('Copy line', 'Alt+C', self._onShortcutCopyLine, 'edit-copy')
self.pasteLineAction = createAction('Paste line', 'Alt+V', self._onShortcutPasteLine, 'edit-paste')
self.duplicateLineAction = createAction('Duplicate line', 'Alt+D', self._onShortcutDuplicateLine)
self.invokeCompletionAction = createAction('Invoke completion', 'Ctrl+Space', self._completer.invokeCompletion)
# other
self.printAction = createAction('Print', 'Ctrl+P', self._onShortcutPrint, 'document-print') |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._billing_date is not None:
return False
if self._type_description is not None:
return False
if self._type_description_translated is not None:
return False
if self._unit_vat_exclusive is not None:
return False
if self._unit_vat_inclusive is not None:
return False
if self._vat is not None:
return False
if self._quantity is not None:
return False
if self._total_vat_exclusive is not None:
return False
if self._total_vat_inclusive is not None:
return False
return True | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._billing_date is not None:
return False
if self._type_description is not None:
return False
if self._type_description_translated is not None:
return False
if self._unit_vat_exclusive is not None:
return False
if self._unit_vat_inclusive is not None:
return False
if self._vat is not None:
return False
if self._quantity is not None:
return False
if self._total_vat_exclusive is not None:
return False
if self._total_vat_inclusive is not None:
return False
return True |
def to_dict(self):
"""
Converts the column to a dictionary representation accepted
by the Citrination server.
:return: Dictionary with basic options, plus any column type specific
options held under the "options" key
:rtype: dict
"""
return {
"type": self.type,
"name": self.name,
"group_by_key": self.group_by_key,
"role": self.role,
"units": self.units,
"options": self.build_options()
} | Converts the column to a dictionary representation accepted
by the Citrination server.
:return: Dictionary with basic options, plus any column type specific
options held under the "options" key
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Converts the column to a dictionary representation accepted
by the Citrination server.
:return: Dictionary with basic options, plus any column type specific
options held under the "options" key
:rtype: dict
### Response:
def to_dict(self):
"""
Converts the column to a dictionary representation accepted
by the Citrination server.
:return: Dictionary with basic options, plus any column type specific
options held under the "options" key
:rtype: dict
"""
return {
"type": self.type,
"name": self.name,
"group_by_key": self.group_by_key,
"role": self.role,
"units": self.units,
"options": self.build_options()
} |
def _cooked_fields(self, dj_fields):
"""
Returns a tuple of cooked fields
:param dj_fields: a list of django name fields
:return:
"""
from django.db import models
valids = []
for field in dj_fields:
try:
dj_field, _, _, _ = self.model._meta.get_field_by_name(field)
if isinstance(dj_field, models.ForeignKey):
valids.append((field + "_id", field, dj_field))
else:
valids.append((field, field, dj_field))
except models.FieldDoesNotExist:
valids.append((field, field, None))
return valids | Returns a tuple of cooked fields
:param dj_fields: a list of django name fields
:return: | Below is the the instruction that describes the task:
### Input:
Returns a tuple of cooked fields
:param dj_fields: a list of django name fields
:return:
### Response:
def _cooked_fields(self, dj_fields):
"""
Returns a tuple of cooked fields
:param dj_fields: a list of django name fields
:return:
"""
from django.db import models
valids = []
for field in dj_fields:
try:
dj_field, _, _, _ = self.model._meta.get_field_by_name(field)
if isinstance(dj_field, models.ForeignKey):
valids.append((field + "_id", field, dj_field))
else:
valids.append((field, field, dj_field))
except models.FieldDoesNotExist:
valids.append((field, field, None))
return valids |
def unregister(self):
"""unregister model at tracking server"""
uuid = self.metadata["tracker"]["uuid"]
# connect to server
result = requests.delete(urljoin(self.tracker, 'models' + "/" + uuid))
logger.debug("unregistered at server %s with %s: %s", self.tracker, uuid, result) | unregister model at tracking server | Below is the the instruction that describes the task:
### Input:
unregister model at tracking server
### Response:
def unregister(self):
"""unregister model at tracking server"""
uuid = self.metadata["tracker"]["uuid"]
# connect to server
result = requests.delete(urljoin(self.tracker, 'models' + "/" + uuid))
logger.debug("unregistered at server %s with %s: %s", self.tracker, uuid, result) |
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i) | Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence) | Below is the the instruction that describes the task:
### Input:
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
### Response:
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i) |
def create_from_assocs(self, assocs, **args):
"""
Creates from a list of association objects
"""
amap = defaultdict(list)
subject_label_map = {}
for a in assocs:
subj = a['subject']
subj_id = subj['id']
subj_label = subj['label']
subject_label_map[subj_id] = subj_label
if not a['negated']:
amap[subj_id].append(a['object']['id'])
aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args)
aset.associations_by_subj = defaultdict(list)
aset.associations_by_subj_obj = defaultdict(list)
for a in assocs:
sub_id = a['subject']['id']
obj_id = a['object']['id']
aset.associations_by_subj[sub_id].append(a)
aset.associations_by_subj_obj[(sub_id,obj_id)].append(a)
return aset | Creates from a list of association objects | Below is the the instruction that describes the task:
### Input:
Creates from a list of association objects
### Response:
def create_from_assocs(self, assocs, **args):
"""
Creates from a list of association objects
"""
amap = defaultdict(list)
subject_label_map = {}
for a in assocs:
subj = a['subject']
subj_id = subj['id']
subj_label = subj['label']
subject_label_map[subj_id] = subj_label
if not a['negated']:
amap[subj_id].append(a['object']['id'])
aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args)
aset.associations_by_subj = defaultdict(list)
aset.associations_by_subj_obj = defaultdict(list)
for a in assocs:
sub_id = a['subject']['id']
obj_id = a['object']['id']
aset.associations_by_subj[sub_id].append(a)
aset.associations_by_subj_obj[(sub_id,obj_id)].append(a)
return aset |
def configure_file_logger(name, log_dir, log_level=logging.DEBUG):
"""Configures logging to use the :class:`SizeRotatingFileHandler`"""
from .srothandler import SizeRotatingFileHandler
root = logging.getLogger()
root.setLevel(log_level)
handler = SizeRotatingFileHandler(os.path.join(log_dir, '%s.log' % name))
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(LOG_FORMAT_STANDARD))
root.addHandler(handler) | Configures logging to use the :class:`SizeRotatingFileHandler` | Below is the the instruction that describes the task:
### Input:
Configures logging to use the :class:`SizeRotatingFileHandler`
### Response:
def configure_file_logger(name, log_dir, log_level=logging.DEBUG):
"""Configures logging to use the :class:`SizeRotatingFileHandler`"""
from .srothandler import SizeRotatingFileHandler
root = logging.getLogger()
root.setLevel(log_level)
handler = SizeRotatingFileHandler(os.path.join(log_dir, '%s.log' % name))
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(LOG_FORMAT_STANDARD))
root.addHandler(handler) |
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id) | Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult | Below is the the instruction that describes the task:
### Input:
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
### Response:
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id) |
def get_composition_admin_session(self):
"""Gets a composition administration session for creating, updating and deleting compositions.
return: (osid.repository.CompositionAdminSession) - a
``CompositionAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_composition_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_composition_admin()`` is ``true``.*
"""
if not self.supports_composition_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CompositionAdminSession(runtime=self._runtime) | Gets a composition administration session for creating, updating and deleting compositions.
return: (osid.repository.CompositionAdminSession) - a
``CompositionAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_composition_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_composition_admin()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets a composition administration session for creating, updating and deleting compositions.
return: (osid.repository.CompositionAdminSession) - a
``CompositionAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_composition_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_composition_admin()`` is ``true``.*
### Response:
def get_composition_admin_session(self):
"""Gets a composition administration session for creating, updating and deleting compositions.
return: (osid.repository.CompositionAdminSession) - a
``CompositionAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_composition_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_composition_admin()`` is ``true``.*
"""
if not self.supports_composition_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CompositionAdminSession(runtime=self._runtime) |
def get_encodings_from_content(content):
"""
Code from:
https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py
Return encodings from given content string.
:param content: string to extract encodings from.
"""
if isinstance(content, bytes):
find_charset = re.compile(
br'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I
).findall
find_xml = re.compile(
br'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]'
).findall
return [encoding.decode('utf-8') for encoding in
find_charset(content) + find_xml(content)]
else:
find_charset = re.compile(
r'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I
).findall
find_xml = re.compile(
r'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]'
).findall
return find_charset(content) + find_xml(content) | Code from:
https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py
Return encodings from given content string.
:param content: string to extract encodings from. | Below is the the instruction that describes the task:
### Input:
Code from:
https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py
Return encodings from given content string.
:param content: string to extract encodings from.
### Response:
def get_encodings_from_content(content):
"""
Code from:
https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py
Return encodings from given content string.
:param content: string to extract encodings from.
"""
if isinstance(content, bytes):
find_charset = re.compile(
br'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I
).findall
find_xml = re.compile(
br'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]'
).findall
return [encoding.decode('utf-8') for encoding in
find_charset(content) + find_xml(content)]
else:
find_charset = re.compile(
r'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I
).findall
find_xml = re.compile(
r'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]'
).findall
return find_charset(content) + find_xml(content) |
def recCopyElement(oldelement):
"""Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements
"""
newelement = ETREE.Element(oldelement.tag, oldelement.attrib)
if len(oldelement.getchildren()) > 0:
for childelement in oldelement.getchildren():
newelement.append(recCopyElement(childelement))
return newelement | Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements | Below is the the instruction that describes the task:
### Input:
Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements
### Response:
def recCopyElement(oldelement):
"""Generates a copy of an xml element and recursively of all
child elements.
:param oldelement: an instance of lxml.etree._Element
:returns: a copy of the "oldelement"
.. warning::
doesn't copy ``.text`` or ``.tail`` of xml elements
"""
newelement = ETREE.Element(oldelement.tag, oldelement.attrib)
if len(oldelement.getchildren()) > 0:
for childelement in oldelement.getchildren():
newelement.append(recCopyElement(childelement))
return newelement |
def set_mode(self, mode):
"""Set Lupusec alarm mode."""
_LOGGER.debug('State change called from alarm device')
if not mode:
_LOGGER.info('No mode supplied')
elif mode not in CONST.ALL_MODES:
_LOGGER.warning('Invalid mode')
response_object = self._lupusec.set_mode(CONST.MODE_TRANSLATION[mode])
if response_object['result'] != 1:
_LOGGER.warning('Mode setting unsuccessful')
self._json_state['mode'] = mode
_LOGGER.info('Mode set to: %s', mode)
return True | Set Lupusec alarm mode. | Below is the the instruction that describes the task:
### Input:
Set Lupusec alarm mode.
### Response:
def set_mode(self, mode):
"""Set Lupusec alarm mode."""
_LOGGER.debug('State change called from alarm device')
if not mode:
_LOGGER.info('No mode supplied')
elif mode not in CONST.ALL_MODES:
_LOGGER.warning('Invalid mode')
response_object = self._lupusec.set_mode(CONST.MODE_TRANSLATION[mode])
if response_object['result'] != 1:
_LOGGER.warning('Mode setting unsuccessful')
self._json_state['mode'] = mode
_LOGGER.info('Mode set to: %s', mode)
return True |
def unknown_command(self, args):
'''handle mode switch by mode name as command'''
mode_mapping = self.master.mode_mapping()
mode = args[0].upper()
if mode in mode_mapping:
self.master.set_mode(mode_mapping[mode])
return True
return False | handle mode switch by mode name as command | Below is the the instruction that describes the task:
### Input:
handle mode switch by mode name as command
### Response:
def unknown_command(self, args):
'''handle mode switch by mode name as command'''
mode_mapping = self.master.mode_mapping()
mode = args[0].upper()
if mode in mode_mapping:
self.master.set_mode(mode_mapping[mode])
return True
return False |
def parameterstep(timestep=None):
"""Define a parameter time step size within a parameter control file.
Argument:
* timestep(|Period|): Time step size.
Function parameterstep should usually be be applied in a line
immediately behind the model import. Defining the step size of time
dependent parameters is a prerequisite to access any model specific
parameter.
Note that parameterstep implements some namespace magic by
means of the module |inspect|. This makes things a little
complicated for framework developers, but it eases the definition of
parameter control files for framework users.
"""
if timestep is not None:
parametertools.Parameter.parameterstep(timestep)
namespace = inspect.currentframe().f_back.f_locals
model = namespace.get('model')
if model is None:
model = namespace['Model']()
namespace['model'] = model
if hydpy.pub.options.usecython and 'cythonizer' in namespace:
cythonizer = namespace['cythonizer']
namespace['cythonmodule'] = cythonizer.cymodule
model.cymodel = cythonizer.cymodule.Model()
namespace['cymodel'] = model.cymodel
model.cymodel.parameters = cythonizer.cymodule.Parameters()
model.cymodel.sequences = cythonizer.cymodule.Sequences()
for numpars_name in ('NumConsts', 'NumVars'):
if hasattr(cythonizer.cymodule, numpars_name):
numpars_new = getattr(cythonizer.cymodule, numpars_name)()
numpars_old = getattr(model, numpars_name.lower())
for (name_numpar, numpar) in vars(numpars_old).items():
setattr(numpars_new, name_numpar, numpar)
setattr(model.cymodel, numpars_name.lower(), numpars_new)
for name in dir(model.cymodel):
if (not name.startswith('_')) and hasattr(model, name):
setattr(model, name, getattr(model.cymodel, name))
if 'Parameters' not in namespace:
namespace['Parameters'] = parametertools.Parameters
model.parameters = namespace['Parameters'](namespace)
if 'Sequences' not in namespace:
namespace['Sequences'] = sequencetools.Sequences
model.sequences = namespace['Sequences'](**namespace)
namespace['parameters'] = model.parameters
for pars in model.parameters:
namespace[pars.name] = pars
namespace['sequences'] = model.sequences
for seqs in model.sequences:
namespace[seqs.name] = seqs
if 'Masks' in namespace:
model.masks = namespace['Masks'](model)
namespace['masks'] = model.masks
try:
namespace.update(namespace['CONSTANTS'])
except KeyError:
pass
focus = namespace.get('focus')
for par in model.parameters.control:
try:
if (focus is None) or (par is focus):
namespace[par.name] = par
else:
namespace[par.name] = lambda *args, **kwargs: None
except AttributeError:
pass | Define a parameter time step size within a parameter control file.
Argument:
* timestep(|Period|): Time step size.
Function parameterstep should usually be be applied in a line
immediately behind the model import. Defining the step size of time
dependent parameters is a prerequisite to access any model specific
parameter.
Note that parameterstep implements some namespace magic by
means of the module |inspect|. This makes things a little
complicated for framework developers, but it eases the definition of
parameter control files for framework users. | Below is the the instruction that describes the task:
### Input:
Define a parameter time step size within a parameter control file.
Argument:
* timestep(|Period|): Time step size.
Function parameterstep should usually be be applied in a line
immediately behind the model import. Defining the step size of time
dependent parameters is a prerequisite to access any model specific
parameter.
Note that parameterstep implements some namespace magic by
means of the module |inspect|. This makes things a little
complicated for framework developers, but it eases the definition of
parameter control files for framework users.
### Response:
def parameterstep(timestep=None):
"""Define a parameter time step size within a parameter control file.
Argument:
* timestep(|Period|): Time step size.
Function parameterstep should usually be be applied in a line
immediately behind the model import. Defining the step size of time
dependent parameters is a prerequisite to access any model specific
parameter.
Note that parameterstep implements some namespace magic by
means of the module |inspect|. This makes things a little
complicated for framework developers, but it eases the definition of
parameter control files for framework users.
"""
if timestep is not None:
parametertools.Parameter.parameterstep(timestep)
namespace = inspect.currentframe().f_back.f_locals
model = namespace.get('model')
if model is None:
model = namespace['Model']()
namespace['model'] = model
if hydpy.pub.options.usecython and 'cythonizer' in namespace:
cythonizer = namespace['cythonizer']
namespace['cythonmodule'] = cythonizer.cymodule
model.cymodel = cythonizer.cymodule.Model()
namespace['cymodel'] = model.cymodel
model.cymodel.parameters = cythonizer.cymodule.Parameters()
model.cymodel.sequences = cythonizer.cymodule.Sequences()
for numpars_name in ('NumConsts', 'NumVars'):
if hasattr(cythonizer.cymodule, numpars_name):
numpars_new = getattr(cythonizer.cymodule, numpars_name)()
numpars_old = getattr(model, numpars_name.lower())
for (name_numpar, numpar) in vars(numpars_old).items():
setattr(numpars_new, name_numpar, numpar)
setattr(model.cymodel, numpars_name.lower(), numpars_new)
for name in dir(model.cymodel):
if (not name.startswith('_')) and hasattr(model, name):
setattr(model, name, getattr(model.cymodel, name))
if 'Parameters' not in namespace:
namespace['Parameters'] = parametertools.Parameters
model.parameters = namespace['Parameters'](namespace)
if 'Sequences' not in namespace:
namespace['Sequences'] = sequencetools.Sequences
model.sequences = namespace['Sequences'](**namespace)
namespace['parameters'] = model.parameters
for pars in model.parameters:
namespace[pars.name] = pars
namespace['sequences'] = model.sequences
for seqs in model.sequences:
namespace[seqs.name] = seqs
if 'Masks' in namespace:
model.masks = namespace['Masks'](model)
namespace['masks'] = model.masks
try:
namespace.update(namespace['CONSTANTS'])
except KeyError:
pass
focus = namespace.get('focus')
for par in model.parameters.control:
try:
if (focus is None) or (par is focus):
namespace[par.name] = par
else:
namespace[par.name] = lambda *args, **kwargs: None
except AttributeError:
pass |
def last_page(self_or_cls, max_=None):
"""
Return a query set which requests the last page.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the last page.
"""
result = self_or_cls()
result.before = Before()
result.max_ = max_
return result | Return a query set which requests the last page.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the last page. | Below is the the instruction that describes the task:
### Input:
Return a query set which requests the last page.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the last page.
### Response:
def last_page(self_or_cls, max_=None):
"""
Return a query set which requests the last page.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the last page.
"""
result = self_or_cls()
result.before = Before()
result.max_ = max_
return result |
def read_raw_table(self, table):
"""
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
"""
fields = self.table_relations(table) if self.cast else None
field_names = [f.name for f in self.table_relations(table)]
field_len = len(field_names)
table_path = os.path.join(self.root, table)
with _open_table(table_path, self.encoding) as tbl:
for line in tbl:
cols = decode_row(line, fields=fields)
if len(cols) != field_len:
# should this throw an exception instead?
logging.error('Number of stored fields ({}) '
'differ from the expected number({}); '
'fields may be misaligned!'
.format(len(cols), field_len))
row = OrderedDict(zip(field_names, cols))
yield row | Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used. | Below is the the instruction that describes the task:
### Input:
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
### Response:
def read_raw_table(self, table):
"""
Yield rows in the [incr tsdb()] *table*. A row is a dictionary
mapping column names to values. Data from a profile is decoded
by decode_row(). No filters or applicators are used.
"""
fields = self.table_relations(table) if self.cast else None
field_names = [f.name for f in self.table_relations(table)]
field_len = len(field_names)
table_path = os.path.join(self.root, table)
with _open_table(table_path, self.encoding) as tbl:
for line in tbl:
cols = decode_row(line, fields=fields)
if len(cols) != field_len:
# should this throw an exception instead?
logging.error('Number of stored fields ({}) '
'differ from the expected number({}); '
'fields may be misaligned!'
.format(len(cols), field_len))
row = OrderedDict(zip(field_names, cols))
yield row |
def _click(x, y, button):
"""Send the mouse click event to Windows by calling the mouse_event() win32
function.
Args:
button (str): The mouse button, either 'left', 'middle', or 'right'
x (int): The x position of the mouse event.
y (int): The y position of the mouse event.
Returns:
None
"""
if button == 'left':
try:
_sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y)
except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60
pass
elif button == 'middle':
try:
_sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y)
except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60
pass
elif button == 'right':
try:
_sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y)
except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60
pass
else:
assert False, "button argument not in ('left', 'middle', 'right')" | Send the mouse click event to Windows by calling the mouse_event() win32
function.
Args:
button (str): The mouse button, either 'left', 'middle', or 'right'
x (int): The x position of the mouse event.
y (int): The y position of the mouse event.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Send the mouse click event to Windows by calling the mouse_event() win32
function.
Args:
button (str): The mouse button, either 'left', 'middle', or 'right'
x (int): The x position of the mouse event.
y (int): The y position of the mouse event.
Returns:
None
### Response:
def _click(x, y, button):
"""Send the mouse click event to Windows by calling the mouse_event() win32
function.
Args:
button (str): The mouse button, either 'left', 'middle', or 'right'
x (int): The x position of the mouse event.
y (int): The y position of the mouse event.
Returns:
None
"""
if button == 'left':
try:
_sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y)
except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60
pass
elif button == 'middle':
try:
_sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y)
except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60
pass
elif button == 'right':
try:
_sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y)
except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60
pass
else:
assert False, "button argument not in ('left', 'middle', 'right')" |
def cmd_velocity(self, args):
'''velocity x-ms y-ms z-ms'''
if (len(args) != 3):
print("Usage: velocity x y z (m/s)")
return
if (len(args) == 3):
x_mps = float(args[0])
y_mps = float(args[1])
z_mps = float(args[2])
#print("x:%f, y:%f, z:%f" % (x_mps, y_mps, z_mps))
self.master.mav.set_position_target_local_ned_send(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
x_mps, y_mps, -z_mps, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) | velocity x-ms y-ms z-ms | Below is the the instruction that describes the task:
### Input:
velocity x-ms y-ms z-ms
### Response:
def cmd_velocity(self, args):
'''velocity x-ms y-ms z-ms'''
if (len(args) != 3):
print("Usage: velocity x y z (m/s)")
return
if (len(args) == 3):
x_mps = float(args[0])
y_mps = float(args[1])
z_mps = float(args[2])
#print("x:%f, y:%f, z:%f" % (x_mps, y_mps, z_mps))
self.master.mav.set_position_target_local_ned_send(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
x_mps, y_mps, -z_mps, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) |
def split_bits(value, *bits):
"""
Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4]
"""
result = []
for b in reversed(bits):
mask = (1 << b) - 1
result.append(value & mask)
value = value >> b
assert value == 0
result.reverse()
return result | Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4] | Below is the the instruction that describes the task:
### Input:
Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4]
### Response:
def split_bits(value, *bits):
"""
Split integer value into list of ints, according to `bits` list.
For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4]
"""
result = []
for b in reversed(bits):
mask = (1 << b) - 1
result.append(value & mask)
value = value >> b
assert value == 0
result.reverse()
return result |
def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
vrf_prof=None, desc=None):
"""Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project.
"""
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name, desc,
dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof,
operation='PUT')
if res and res.status_code in self._resp_ok:
LOG.debug("Update %s partition in DCNM.", part_name)
else:
LOG.error("Failed to update %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) | Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project. | Below is the the instruction that describes the task:
### Input:
Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project.
### Response:
def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
vrf_prof=None, desc=None):
"""Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project.
"""
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name, desc,
dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof,
operation='PUT')
if res and res.status_code in self._resp_ok:
LOG.debug("Update %s partition in DCNM.", part_name)
else:
LOG.error("Failed to update %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) |
def map(cls, iterable, func, *a, **kw):
"""
Iterable-first replacement of Python's built-in `map()` function.
"""
return cls(func(x, *a, **kw) for x in iterable) | Iterable-first replacement of Python's built-in `map()` function. | Below is the the instruction that describes the task:
### Input:
Iterable-first replacement of Python's built-in `map()` function.
### Response:
def map(cls, iterable, func, *a, **kw):
"""
Iterable-first replacement of Python's built-in `map()` function.
"""
return cls(func(x, *a, **kw) for x in iterable) |
def char2hex(a: str):
"""Convert a hex character to its integer value.
'0' becomes 0, '9' becomes 9
'A' becomes 10, 'F' becomes 15
'a' becomes 10, 'f' becomes 15
Returns -1 on error.
"""
if "0" <= a <= "9":
return ord(a) - 48
elif "A" <= a <= "F":
return ord(a) - 55
elif "a" <= a <= "f": # a-f
return ord(a) - 87
return -1 | Convert a hex character to its integer value.
'0' becomes 0, '9' becomes 9
'A' becomes 10, 'F' becomes 15
'a' becomes 10, 'f' becomes 15
Returns -1 on error. | Below is the the instruction that describes the task:
### Input:
Convert a hex character to its integer value.
'0' becomes 0, '9' becomes 9
'A' becomes 10, 'F' becomes 15
'a' becomes 10, 'f' becomes 15
Returns -1 on error.
### Response:
def char2hex(a: str):
"""Convert a hex character to its integer value.
'0' becomes 0, '9' becomes 9
'A' becomes 10, 'F' becomes 15
'a' becomes 10, 'f' becomes 15
Returns -1 on error.
"""
if "0" <= a <= "9":
return ord(a) - 48
elif "A" <= a <= "F":
return ord(a) - 55
elif "a" <= a <= "f": # a-f
return ord(a) - 87
return -1 |
def validate(table, constraints=None, header=None):
"""
Validate a `table` against a set of `constraints` and/or an expected
`header`, e.g.::
>>> import petl as etl
>>> # define some validation constraints
... header = ('foo', 'bar', 'baz')
>>> constraints = [
... dict(name='foo_int', field='foo', test=int),
... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')),
... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']),
... dict(name='not_none', assertion=lambda row: None not in row),
... dict(name='qux_int', field='qux', test=int, optional=True),
... ]
>>> # now validate a table
... table = (('foo', 'bar', 'bazzz'),
... (1, '2000-01-01', 'Y'),
... ('x', '2010-10-10', 'N'),
... (2, '2000/01/01', 'Y'),
... (3, '2015-12-12', 'x'),
... (4, None, 'N'),
... ('y', '1999-99-99', 'z'),
... (6, '2000-01-01'),
... (7, '2001-02-02', 'N', True))
>>> problems = etl.validate(table, constraints=constraints, header=header)
>>> problems.lookall()
+--------------+-----+-------+--------------+------------------+
| name | row | field | value | error |
+==============+=====+=======+==============+==================+
| '__header__' | 0 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 5 | 'bar' | None | 'AttributeError' |
+--------------+-----+-------+--------------+------------------+
| 'not_none' | 5 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 7 | None | 2 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 7 | 'baz' | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 8 | None | 4 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
Returns a table of validation problems.
""" # noqa
return ProblemsView(table, constraints=constraints, header=header) | Validate a `table` against a set of `constraints` and/or an expected
`header`, e.g.::
>>> import petl as etl
>>> # define some validation constraints
... header = ('foo', 'bar', 'baz')
>>> constraints = [
... dict(name='foo_int', field='foo', test=int),
... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')),
... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']),
... dict(name='not_none', assertion=lambda row: None not in row),
... dict(name='qux_int', field='qux', test=int, optional=True),
... ]
>>> # now validate a table
... table = (('foo', 'bar', 'bazzz'),
... (1, '2000-01-01', 'Y'),
... ('x', '2010-10-10', 'N'),
... (2, '2000/01/01', 'Y'),
... (3, '2015-12-12', 'x'),
... (4, None, 'N'),
... ('y', '1999-99-99', 'z'),
... (6, '2000-01-01'),
... (7, '2001-02-02', 'N', True))
>>> problems = etl.validate(table, constraints=constraints, header=header)
>>> problems.lookall()
+--------------+-----+-------+--------------+------------------+
| name | row | field | value | error |
+==============+=====+=======+==============+==================+
| '__header__' | 0 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 5 | 'bar' | None | 'AttributeError' |
+--------------+-----+-------+--------------+------------------+
| 'not_none' | 5 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 7 | None | 2 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 7 | 'baz' | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 8 | None | 4 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
Returns a table of validation problems. | Below is the the instruction that describes the task:
### Input:
Validate a `table` against a set of `constraints` and/or an expected
`header`, e.g.::
>>> import petl as etl
>>> # define some validation constraints
... header = ('foo', 'bar', 'baz')
>>> constraints = [
... dict(name='foo_int', field='foo', test=int),
... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')),
... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']),
... dict(name='not_none', assertion=lambda row: None not in row),
... dict(name='qux_int', field='qux', test=int, optional=True),
... ]
>>> # now validate a table
... table = (('foo', 'bar', 'bazzz'),
... (1, '2000-01-01', 'Y'),
... ('x', '2010-10-10', 'N'),
... (2, '2000/01/01', 'Y'),
... (3, '2015-12-12', 'x'),
... (4, None, 'N'),
... ('y', '1999-99-99', 'z'),
... (6, '2000-01-01'),
... (7, '2001-02-02', 'N', True))
>>> problems = etl.validate(table, constraints=constraints, header=header)
>>> problems.lookall()
+--------------+-----+-------+--------------+------------------+
| name | row | field | value | error |
+==============+=====+=======+==============+==================+
| '__header__' | 0 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 5 | 'bar' | None | 'AttributeError' |
+--------------+-----+-------+--------------+------------------+
| 'not_none' | 5 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 7 | None | 2 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 7 | 'baz' | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 8 | None | 4 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
Returns a table of validation problems.
### Response:
def validate(table, constraints=None, header=None):
"""
Validate a `table` against a set of `constraints` and/or an expected
`header`, e.g.::
>>> import petl as etl
>>> # define some validation constraints
... header = ('foo', 'bar', 'baz')
>>> constraints = [
... dict(name='foo_int', field='foo', test=int),
... dict(name='bar_date', field='bar', test=etl.dateparser('%Y-%m-%d')),
... dict(name='baz_enum', field='baz', assertion=lambda v: v in ['Y', 'N']),
... dict(name='not_none', assertion=lambda row: None not in row),
... dict(name='qux_int', field='qux', test=int, optional=True),
... ]
>>> # now validate a table
... table = (('foo', 'bar', 'bazzz'),
... (1, '2000-01-01', 'Y'),
... ('x', '2010-10-10', 'N'),
... (2, '2000/01/01', 'Y'),
... (3, '2015-12-12', 'x'),
... (4, None, 'N'),
... ('y', '1999-99-99', 'z'),
... (6, '2000-01-01'),
... (7, '2001-02-02', 'N', True))
>>> problems = etl.validate(table, constraints=constraints, header=header)
>>> problems.lookall()
+--------------+-----+-------+--------------+------------------+
| name | row | field | value | error |
+==============+=====+=======+==============+==================+
| '__header__' | 0 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 2 | 'foo' | 'x' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 3 | 'bar' | '2000/01/01' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 4 | 'baz' | 'x' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 5 | 'bar' | None | 'AttributeError' |
+--------------+-----+-------+--------------+------------------+
| 'not_none' | 5 | None | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'foo_int' | 6 | 'foo' | 'y' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'bar_date' | 6 | 'bar' | '1999-99-99' | 'ValueError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 6 | 'baz' | 'z' | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 7 | None | 2 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| 'baz_enum' | 7 | 'baz' | None | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
| '__len__' | 8 | None | 4 | 'AssertionError' |
+--------------+-----+-------+--------------+------------------+
Returns a table of validation problems.
""" # noqa
return ProblemsView(table, constraints=constraints, header=header) |
def make_params(
key_parts: Sequence[str],
variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]:
"""
Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(ala nested tuples) of URL parts
:return: The param dict with the values\
assigned to the keys
:private:
"""
# The unwrapped variable parts are in reverse order.
# Instead of reversing those we reverse the key parts
# and avoid the O(n) space required for reversing the vars
return dict(zip(reversed(key_parts), _unwrap(variable_parts))) | Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(ala nested tuples) of URL parts
:return: The param dict with the values\
assigned to the keys
:private: | Below is the the instruction that describes the task:
### Input:
Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(ala nested tuples) of URL parts
:return: The param dict with the values\
assigned to the keys
:private:
### Response:
def make_params(
key_parts: Sequence[str],
variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]:
"""
Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(ala nested tuples) of URL parts
:return: The param dict with the values\
assigned to the keys
:private:
"""
# The unwrapped variable parts are in reverse order.
# Instead of reversing those we reverse the key parts
# and avoid the O(n) space required for reversing the vars
return dict(zip(reversed(key_parts), _unwrap(variable_parts))) |
def decode_tuple(data, encoding=None, errors='strict', keep=False,
normalize=False, preserve_dict_class=False, to_str=False):
'''
Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2.
'''
return tuple(
decode_list(data, encoding, errors, keep, normalize,
preserve_dict_class, True, to_str)
) | Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2. | Below is the the instruction that describes the task:
### Input:
Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2.
### Response:
def decode_tuple(data, encoding=None, errors='strict', keep=False,
normalize=False, preserve_dict_class=False, to_str=False):
'''
Decode all string values to Unicode. Optionally use to_str=True to ensure
strings are str types and not unicode on Python 2.
'''
return tuple(
decode_list(data, encoding, errors, keep, normalize,
preserve_dict_class, True, to_str)
) |
def fixed_width_binning(data=None, bin_width: Union[float, int] = 1, *, range=None, includes_right_edge=False, **kwargs) -> FixedWidthBinning:
"""Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width
"""
result = FixedWidthBinning(bin_width=bin_width, includes_right_edge=includes_right_edge,
**kwargs)
if range:
result._force_bin_existence(range[0])
result._force_bin_existence(range[1], includes_right_edge=True)
if not kwargs.get("adaptive"):
return result # Otherwise we want to adapt to data
if data is not None and data.shape[0]:
# print("Jo, tady")
result._force_bin_existence([np.min(data), np.max(data)],
includes_right_edge=includes_right_edge)
return result | Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width | Below is the the instruction that describes the task:
### Input:
Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width
### Response:
def fixed_width_binning(data=None, bin_width: Union[float, int] = 1, *, range=None, includes_right_edge=False, **kwargs) -> FixedWidthBinning:
"""Construct fixed-width binning schema.
Parameters
----------
bin_width: float
range: Optional[tuple]
(min, max)
align: Optional[float]
Must be multiple of bin_width
"""
result = FixedWidthBinning(bin_width=bin_width, includes_right_edge=includes_right_edge,
**kwargs)
if range:
result._force_bin_existence(range[0])
result._force_bin_existence(range[1], includes_right_edge=True)
if not kwargs.get("adaptive"):
return result # Otherwise we want to adapt to data
if data is not None and data.shape[0]:
# print("Jo, tady")
result._force_bin_existence([np.min(data), np.max(data)],
includes_right_edge=includes_right_edge)
return result |
def fmt_text(text):
""" convert characters that aren't printable to hex format
"""
PRINTABLE_CHAR = set(
list(range(ord(' '), ord('~') + 1)) + [ord('\r'), ord('\n')])
newtext = ("\\x{:02X}".format(
c) if c not in PRINTABLE_CHAR else chr(c) for c in text)
textlines = "\r\n".join(l.strip('\r')
for l in "".join(newtext).split('\n'))
return textlines | convert characters that aren't printable to hex format | Below is the the instruction that describes the task:
### Input:
convert characters that aren't printable to hex format
### Response:
def fmt_text(text):
""" convert characters that aren't printable to hex format
"""
PRINTABLE_CHAR = set(
list(range(ord(' '), ord('~') + 1)) + [ord('\r'), ord('\n')])
newtext = ("\\x{:02X}".format(
c) if c not in PRINTABLE_CHAR else chr(c) for c in text)
textlines = "\r\n".join(l.strip('\r')
for l in "".join(newtext).split('\n'))
return textlines |
def ack(self):
"""Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self._frame)
self._state = "ACK" | Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected. | Below is the the instruction that describes the task:
### Input:
Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
### Response:
def ack(self):
"""Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self._frame)
self._state = "ACK" |
def restore(path, password_file=None):
"""
Retrieves a file from the atk vault and restores it to its original
location, re-encrypting it if it has changed.
:param path: path to original file
"""
vault = VaultLib(get_vault_password(password_file))
atk_path = os.path.join(ATK_VAULT, path)
# Load stored data
with open(os.path.join(atk_path, 'encrypted'), 'rb') as f:
old_data = f.read()
with open(os.path.join(atk_path, 'hash'), 'rb') as f:
old_hash = f.read()
# Load new data
with open(path, 'rb') as f:
new_data = f.read()
new_hash = hashlib.sha1(new_data).hexdigest()
# Determine whether to re-encrypt
if old_hash != new_hash:
new_data = vault.encrypt(new_data)
else:
new_data = old_data
# Update file
with open(path, 'wb') as f:
f.write(new_data)
# Clean atk vault
os.remove(os.path.join(atk_path, 'encrypted'))
os.remove(os.path.join(atk_path, 'hash')) | Retrieves a file from the atk vault and restores it to its original
location, re-encrypting it if it has changed.
:param path: path to original file | Below is the the instruction that describes the task:
### Input:
Retrieves a file from the atk vault and restores it to its original
location, re-encrypting it if it has changed.
:param path: path to original file
### Response:
def restore(path, password_file=None):
"""
Retrieves a file from the atk vault and restores it to its original
location, re-encrypting it if it has changed.
:param path: path to original file
"""
vault = VaultLib(get_vault_password(password_file))
atk_path = os.path.join(ATK_VAULT, path)
# Load stored data
with open(os.path.join(atk_path, 'encrypted'), 'rb') as f:
old_data = f.read()
with open(os.path.join(atk_path, 'hash'), 'rb') as f:
old_hash = f.read()
# Load new data
with open(path, 'rb') as f:
new_data = f.read()
new_hash = hashlib.sha1(new_data).hexdigest()
# Determine whether to re-encrypt
if old_hash != new_hash:
new_data = vault.encrypt(new_data)
else:
new_data = old_data
# Update file
with open(path, 'wb') as f:
f.write(new_data)
# Clean atk vault
os.remove(os.path.join(atk_path, 'encrypted'))
os.remove(os.path.join(atk_path, 'hash')) |
def listRunSummaries(self, dataset="", run_num=-1):
"""
API to list run summaries, like the maximal lumisection in a run.
:param dataset: dataset name (Optional)
:type dataset: str
:param run_num: Run number (Required)
:type run_num: str, long, int
:rtype: list containing a dictionary with key max_lumi
"""
if run_num==-1:
dbsExceptionHandler("dbsException-invalid-input",
"The run_num parameter is mandatory",
self.logger.exception)
if re.search('[*,%]', dataset):
dbsExceptionHandler("dbsException-invalid-input",
"No wildcards are allowed in dataset",
self.logger.exception)
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when dataset is given in this API.
# YG Jan. 16 2019
if ((run_num == -1 or run_num == '-1') and dataset==''):
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input when no dataset is present.",
self.logger.exception)
conn = None
try:
conn = self.dbi.connection()
return self.dbsRunSummaryListDAO.execute(conn, dataset, run_num)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listRunSummaries. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'],
self.logger.exception, sError)
finally:
if conn:
conn.close() | API to list run summaries, like the maximal lumisection in a run.
:param dataset: dataset name (Optional)
:type dataset: str
:param run_num: Run number (Required)
:type run_num: str, long, int
:rtype: list containing a dictionary with key max_lumi | Below is the the instruction that describes the task:
### Input:
API to list run summaries, like the maximal lumisection in a run.
:param dataset: dataset name (Optional)
:type dataset: str
:param run_num: Run number (Required)
:type run_num: str, long, int
:rtype: list containing a dictionary with key max_lumi
### Response:
def listRunSummaries(self, dataset="", run_num=-1):
"""
API to list run summaries, like the maximal lumisection in a run.
:param dataset: dataset name (Optional)
:type dataset: str
:param run_num: Run number (Required)
:type run_num: str, long, int
:rtype: list containing a dictionary with key max_lumi
"""
if run_num==-1:
dbsExceptionHandler("dbsException-invalid-input",
"The run_num parameter is mandatory",
self.logger.exception)
if re.search('[*,%]', dataset):
dbsExceptionHandler("dbsException-invalid-input",
"No wildcards are allowed in dataset",
self.logger.exception)
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when dataset is given in this API.
# YG Jan. 16 2019
if ((run_num == -1 or run_num == '-1') and dataset==''):
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input when no dataset is present.",
self.logger.exception)
conn = None
try:
conn = self.dbi.connection()
return self.dbsRunSummaryListDAO.execute(conn, dataset, run_num)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listRunSummaries. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'],
self.logger.exception, sError)
finally:
if conn:
conn.close() |
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply `func` to every input to get its label."
return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs) | Apply `func` to every input to get its label. | Below is the the instruction that describes the task:
### Input:
Apply `func` to every input to get its label.
### Response:
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply `func` to every input to get its label."
return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs) |
def hid(manufacturer: str, serial_number: str, model: str) -> str:
"""Computes the HID for the given properties of a device. The HID is suitable to use to an URI."""
return Naming.url_word(manufacturer) + '-' + Naming.url_word(serial_number) + '-' + Naming.url_word(model) | Computes the HID for the given properties of a device. The HID is suitable to use to an URI. | Below is the the instruction that describes the task:
### Input:
Computes the HID for the given properties of a device. The HID is suitable to use to an URI.
### Response:
def hid(manufacturer: str, serial_number: str, model: str) -> str:
"""Computes the HID for the given properties of a device. The HID is suitable to use to an URI."""
return Naming.url_word(manufacturer) + '-' + Naming.url_word(serial_number) + '-' + Naming.url_word(model) |
def decode(self, encoded):
""" Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded.
"""
if self.enforce_reversible:
self.enforce_reversible = False
if self.encode(self.decode(encoded)) != encoded:
raise ValueError('Decoding is not reversible for "%s"' % encoded)
self.enforce_reversible = True
return encoded | Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded. | Below is the the instruction that describes the task:
### Input:
Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded.
### Response:
def decode(self, encoded):
""" Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded.
"""
if self.enforce_reversible:
self.enforce_reversible = False
if self.encode(self.decode(encoded)) != encoded:
raise ValueError('Decoding is not reversible for "%s"' % encoded)
self.enforce_reversible = True
return encoded |
def on_key_release(self, symbol, modifiers):
"""
Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event`
"""
self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers) | Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event` | Below is the the instruction that describes the task:
### Input:
Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event`
### Response:
def on_key_release(self, symbol, modifiers):
"""
Pyglet specific key release callback.
Forwards and translates the events to :py:func:`keyboard_event`
"""
self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers) |
def error_response(self, kwargs_lens, kwargs_ps):
"""
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
"""
C_D_response, model_error = [], []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_lens_i = [kwargs_lens[k] for k in self._idex_lens_list[i]]
C_D_response_i, model_error_i = self._imageModel_list[i].error_response(kwargs_lens_i, kwargs_ps)
model_error.append(model_error_i)
if C_D_response == []:
C_D_response = C_D_response_i
else:
C_D_response = np.append(C_D_response, C_D_response_i)
return C_D_response, model_error | returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties) | Below is the the instruction that describes the task:
### Input:
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
### Response:
def error_response(self, kwargs_lens, kwargs_ps):
"""
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
"""
C_D_response, model_error = [], []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_lens_i = [kwargs_lens[k] for k in self._idex_lens_list[i]]
C_D_response_i, model_error_i = self._imageModel_list[i].error_response(kwargs_lens_i, kwargs_ps)
model_error.append(model_error_i)
if C_D_response == []:
C_D_response = C_D_response_i
else:
C_D_response = np.append(C_D_response, C_D_response_i)
return C_D_response, model_error |
def read_reply(self) -> Reply:
'''Read a reply from the stream.
Returns:
.ftp.request.Reply: The reply
Coroutine.
'''
_logger.debug('Read reply')
reply = Reply()
while True:
line = yield from self._connection.readline()
if line[-1:] != b'\n':
raise NetworkError('Connection closed.')
self._data_event_dispatcher.notify_read(line)
reply.parse(line)
if reply.code is not None:
break
return reply | Read a reply from the stream.
Returns:
.ftp.request.Reply: The reply
Coroutine. | Below is the the instruction that describes the task:
### Input:
Read a reply from the stream.
Returns:
.ftp.request.Reply: The reply
Coroutine.
### Response:
def read_reply(self) -> Reply:
'''Read a reply from the stream.
Returns:
.ftp.request.Reply: The reply
Coroutine.
'''
_logger.debug('Read reply')
reply = Reply()
while True:
line = yield from self._connection.readline()
if line[-1:] != b'\n':
raise NetworkError('Connection closed.')
self._data_event_dispatcher.notify_read(line)
reply.parse(line)
if reply.code is not None:
break
return reply |
def download_reference_files(job, inputs, samples):
"""
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
"""
# Create dictionary to store FileStoreIDs of shared input files
shared_ids = {}
urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt),
('pac', inputs.pac), ('sa', inputs.sa)]
# Alt file is optional and can only be provided, not generated
if inputs.alt:
urls.append(('alt', inputs.alt))
# Download reference
download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G
job.addChild(download_ref)
shared_ids['ref'] = download_ref.rv()
# If FAI is provided, download it. Otherwise, generate it
if inputs.fai:
shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv()
else:
faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv())
shared_ids['fai'] = download_ref.addChild(faidx).rv()
# If all BWA index files are provided, download them. Otherwise, generate them
if all(x[1] for x in urls):
for name, url in urls:
shared_ids[name] = job.addChildJobFn(download_url_job, url).rv()
else:
job.fileStore.logToMaster('BWA index files not provided, creating now')
bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv())
download_ref.addChild(bwa_index)
for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']):
shared_ids[name] = bwa_index.rv(x)
# Map_job distributes one sample in samples to the downlaod_sample_and_align function
job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids) | Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]] | Below is the the instruction that describes the task:
### Input:
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
### Response:
def download_reference_files(job, inputs, samples):
"""
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace inputs: Input arguments (see main)
:param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
"""
# Create dictionary to store FileStoreIDs of shared input files
shared_ids = {}
urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt),
('pac', inputs.pac), ('sa', inputs.sa)]
# Alt file is optional and can only be provided, not generated
if inputs.alt:
urls.append(('alt', inputs.alt))
# Download reference
download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G
job.addChild(download_ref)
shared_ids['ref'] = download_ref.rv()
# If FAI is provided, download it. Otherwise, generate it
if inputs.fai:
shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv()
else:
faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv())
shared_ids['fai'] = download_ref.addChild(faidx).rv()
# If all BWA index files are provided, download them. Otherwise, generate them
if all(x[1] for x in urls):
for name, url in urls:
shared_ids[name] = job.addChildJobFn(download_url_job, url).rv()
else:
job.fileStore.logToMaster('BWA index files not provided, creating now')
bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv())
download_ref.addChild(bwa_index)
for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']):
shared_ids[name] = bwa_index.rv(x)
# Map_job distributes one sample in samples to the downlaod_sample_and_align function
job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids) |
async def request(self, method, url, **kwargs):
"""Handles requests to the API"""
rate_limiter = RateLimiter(max_calls=59, period=60, callback=limited)
# handles ratelimits. max_calls is set to 59 because current implementation will retry in 60s after 60 calls is reached. DBL has a 1h block so obviously this doesn't work well, as it will get a 429 when 60 is reached.
async with rate_limiter: # this works but doesn't 'save' over restart. need a better implementation.
if not self.token:
raise UnauthorizedDetected('UnauthorizedDetected (status code: 401): No TOKEN provided')
headers = {
'User-Agent': self.user_agent,
'Content-Type': 'application/json'
}
if 'json' in kwargs:
kwargs['data'] = to_json(kwargs.pop('json'))
kwargs['headers'] = headers
headers['Authorization'] = self.token
for tries in range(5):
async with self.session.request(method, url, **kwargs) as resp:
log.debug('%s %s with %s has returned %s', method,
url, kwargs.get('data'), resp.status)
data = await json_or_text(resp)
if 300 > resp.status >= 200:
return data
if resp.status == 429: # we are being ratelimited
fmt = 'We are being rate limited. Retrying in %.2f seconds (%.3f minutes).'
# sleep a bit
retry_after = json.loads(resp.headers.get('Retry-After'))
mins = retry_after / 60
log.warning(fmt, retry_after, mins)
# check if it's a global rate limit (True as only 1 ratelimit atm - /api/bots)
is_global = True # is_global = data.get('global', False)
if is_global:
self._global_over.clear()
await asyncio.sleep(retry_after, loop=self.loop)
log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
self._global_over.set()
log.debug('Global rate limit is now over.')
continue
if resp.status == 400:
raise HTTPException(resp, data)
elif resp.status == 401:
raise Unauthorized(resp, data)
elif resp.status == 403:
raise Forbidden(resp, data)
elif resp.status == 404:
raise NotFound(resp, data)
else:
raise HTTPException(resp, data)
# We've run out of retries, raise.
raise HTTPException(resp, data) | Handles requests to the API | Below is the the instruction that describes the task:
### Input:
Handles requests to the API
### Response:
async def request(self, method, url, **kwargs):
"""Handles requests to the API"""
rate_limiter = RateLimiter(max_calls=59, period=60, callback=limited)
# handles ratelimits. max_calls is set to 59 because current implementation will retry in 60s after 60 calls is reached. DBL has a 1h block so obviously this doesn't work well, as it will get a 429 when 60 is reached.
async with rate_limiter: # this works but doesn't 'save' over restart. need a better implementation.
if not self.token:
raise UnauthorizedDetected('UnauthorizedDetected (status code: 401): No TOKEN provided')
headers = {
'User-Agent': self.user_agent,
'Content-Type': 'application/json'
}
if 'json' in kwargs:
kwargs['data'] = to_json(kwargs.pop('json'))
kwargs['headers'] = headers
headers['Authorization'] = self.token
for tries in range(5):
async with self.session.request(method, url, **kwargs) as resp:
log.debug('%s %s with %s has returned %s', method,
url, kwargs.get('data'), resp.status)
data = await json_or_text(resp)
if 300 > resp.status >= 200:
return data
if resp.status == 429: # we are being ratelimited
fmt = 'We are being rate limited. Retrying in %.2f seconds (%.3f minutes).'
# sleep a bit
retry_after = json.loads(resp.headers.get('Retry-After'))
mins = retry_after / 60
log.warning(fmt, retry_after, mins)
# check if it's a global rate limit (True as only 1 ratelimit atm - /api/bots)
is_global = True # is_global = data.get('global', False)
if is_global:
self._global_over.clear()
await asyncio.sleep(retry_after, loop=self.loop)
log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
self._global_over.set()
log.debug('Global rate limit is now over.')
continue
if resp.status == 400:
raise HTTPException(resp, data)
elif resp.status == 401:
raise Unauthorized(resp, data)
elif resp.status == 403:
raise Forbidden(resp, data)
elif resp.status == 404:
raise NotFound(resp, data)
else:
raise HTTPException(resp, data)
# We've run out of retries, raise.
raise HTTPException(resp, data) |
def rebin(a, factor, func=None):
u"""Aggregate data from the input array ``a`` into rectangular tiles.
The output array results from tiling ``a`` and applying `func` to
each tile. ``factor`` specifies the size of the tiles. More
precisely, the returned array ``out`` is such that::
out[i0, i1, ...] = func(a[f0*i0:f0*(i0+1), f1*i1:f1*(i1+1), ...])
If ``factor`` is an integer-like scalar, then
``f0 = f1 = ... = factor`` in the above formula. If ``factor`` is a
sequence of integer-like scalars, then ``f0 = factor[0]``,
``f1 = factor[1]``, ... and the length of ``factor`` must equal the
number of dimensions of ``a``.
The reduction function ``func`` must accept an ``axis`` argument.
Examples of such function are
- ``numpy.mean`` (default),
- ``numpy.sum``,
- ``numpy.product``,
- ...
The following example shows how a (4, 6) array is reduced to a
(2, 2) array
>>> import numpy
>>> from rebin import rebin
>>> a = numpy.arange(24).reshape(4, 6)
>>> rebin(a, factor=(2, 3), func=numpy.sum)
array([[ 24, 42],
[ 96, 114]])
If the elements of `factor` are not integer multiples of the
dimensions of `a`, the remainding cells are discarded.
>>> rebin(a, factor=(2, 2), func=numpy.sum)
array([[16, 24, 32],
[72, 80, 88]])
"""
a = np.asarray(a)
dim = a.ndim
if np.isscalar(factor):
factor = dim*(factor,)
elif len(factor) != dim:
raise ValueError('length of factor must be {} (was {})'
.format(dim, len(factor)))
if func is None:
func = np.mean
for f in factor:
if f != int(f):
raise ValueError('factor must be an int or a tuple of ints '
'(got {})'.format(f))
new_shape = [n//f for n, f in zip(a.shape, factor)]+list(factor)
new_strides = [s*f for s, f in zip(a.strides, factor)]+list(a.strides)
aa = as_strided(a, shape=new_shape, strides=new_strides)
return func(aa, axis=tuple(range(-dim, 0))) | u"""Aggregate data from the input array ``a`` into rectangular tiles.
The output array results from tiling ``a`` and applying `func` to
each tile. ``factor`` specifies the size of the tiles. More
precisely, the returned array ``out`` is such that::
out[i0, i1, ...] = func(a[f0*i0:f0*(i0+1), f1*i1:f1*(i1+1), ...])
If ``factor`` is an integer-like scalar, then
``f0 = f1 = ... = factor`` in the above formula. If ``factor`` is a
sequence of integer-like scalars, then ``f0 = factor[0]``,
``f1 = factor[1]``, ... and the length of ``factor`` must equal the
number of dimensions of ``a``.
The reduction function ``func`` must accept an ``axis`` argument.
Examples of such function are
- ``numpy.mean`` (default),
- ``numpy.sum``,
- ``numpy.product``,
- ...
The following example shows how a (4, 6) array is reduced to a
(2, 2) array
>>> import numpy
>>> from rebin import rebin
>>> a = numpy.arange(24).reshape(4, 6)
>>> rebin(a, factor=(2, 3), func=numpy.sum)
array([[ 24, 42],
[ 96, 114]])
If the elements of `factor` are not integer multiples of the
dimensions of `a`, the remainding cells are discarded.
>>> rebin(a, factor=(2, 2), func=numpy.sum)
array([[16, 24, 32],
[72, 80, 88]]) | Below is the the instruction that describes the task:
### Input:
u"""Aggregate data from the input array ``a`` into rectangular tiles.
The output array results from tiling ``a`` and applying `func` to
each tile. ``factor`` specifies the size of the tiles. More
precisely, the returned array ``out`` is such that::
out[i0, i1, ...] = func(a[f0*i0:f0*(i0+1), f1*i1:f1*(i1+1), ...])
If ``factor`` is an integer-like scalar, then
``f0 = f1 = ... = factor`` in the above formula. If ``factor`` is a
sequence of integer-like scalars, then ``f0 = factor[0]``,
``f1 = factor[1]``, ... and the length of ``factor`` must equal the
number of dimensions of ``a``.
The reduction function ``func`` must accept an ``axis`` argument.
Examples of such function are
- ``numpy.mean`` (default),
- ``numpy.sum``,
- ``numpy.product``,
- ...
The following example shows how a (4, 6) array is reduced to a
(2, 2) array
>>> import numpy
>>> from rebin import rebin
>>> a = numpy.arange(24).reshape(4, 6)
>>> rebin(a, factor=(2, 3), func=numpy.sum)
array([[ 24, 42],
[ 96, 114]])
If the elements of `factor` are not integer multiples of the
dimensions of `a`, the remainding cells are discarded.
>>> rebin(a, factor=(2, 2), func=numpy.sum)
array([[16, 24, 32],
[72, 80, 88]])
### Response:
def rebin(a, factor, func=None):
u"""Aggregate data from the input array ``a`` into rectangular tiles.
The output array results from tiling ``a`` and applying `func` to
each tile. ``factor`` specifies the size of the tiles. More
precisely, the returned array ``out`` is such that::
out[i0, i1, ...] = func(a[f0*i0:f0*(i0+1), f1*i1:f1*(i1+1), ...])
If ``factor`` is an integer-like scalar, then
``f0 = f1 = ... = factor`` in the above formula. If ``factor`` is a
sequence of integer-like scalars, then ``f0 = factor[0]``,
``f1 = factor[1]``, ... and the length of ``factor`` must equal the
number of dimensions of ``a``.
The reduction function ``func`` must accept an ``axis`` argument.
Examples of such function are
- ``numpy.mean`` (default),
- ``numpy.sum``,
- ``numpy.product``,
- ...
The following example shows how a (4, 6) array is reduced to a
(2, 2) array
>>> import numpy
>>> from rebin import rebin
>>> a = numpy.arange(24).reshape(4, 6)
>>> rebin(a, factor=(2, 3), func=numpy.sum)
array([[ 24, 42],
[ 96, 114]])
If the elements of `factor` are not integer multiples of the
dimensions of `a`, the remainding cells are discarded.
>>> rebin(a, factor=(2, 2), func=numpy.sum)
array([[16, 24, 32],
[72, 80, 88]])
"""
a = np.asarray(a)
dim = a.ndim
if np.isscalar(factor):
factor = dim*(factor,)
elif len(factor) != dim:
raise ValueError('length of factor must be {} (was {})'
.format(dim, len(factor)))
if func is None:
func = np.mean
for f in factor:
if f != int(f):
raise ValueError('factor must be an int or a tuple of ints '
'(got {})'.format(f))
new_shape = [n//f for n, f in zip(a.shape, factor)]+list(factor)
new_strides = [s*f for s, f in zip(a.strides, factor)]+list(a.strides)
aa = as_strided(a, shape=new_shape, strides=new_strides)
return func(aa, axis=tuple(range(-dim, 0))) |
def _log_begin(self):
""" Log the beginning of the task execution """
self.logger.info("Beginning task: %s", self.__class__.__name__)
if self.salesforce_task and not self.flow:
self.logger.info("%15s %s", "As user:", self.org_config.username)
self.logger.info("%15s %s", "In org:", self.org_config.org_id)
self.logger.info("") | Log the beginning of the task execution | Below is the the instruction that describes the task:
### Input:
Log the beginning of the task execution
### Response:
def _log_begin(self):
""" Log the beginning of the task execution """
self.logger.info("Beginning task: %s", self.__class__.__name__)
if self.salesforce_task and not self.flow:
self.logger.info("%15s %s", "As user:", self.org_config.username)
self.logger.info("%15s %s", "In org:", self.org_config.org_id)
self.logger.info("") |
def resize(img, size, interpolation=Image.BILINEAR):
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation) | r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image. | Below is the the instruction that describes the task:
### Input:
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
### Response:
def resize(img, size, interpolation=Image.BILINEAR):
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation) |
def get_proficiencies(self):
"""Gets the proficiency list resulting from a search.
return: (osid.learning.ProficiencyList) - the proficiency list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.ProficiencyList(self._results, runtime=self._runtime) | Gets the proficiency list resulting from a search.
return: (osid.learning.ProficiencyList) - the proficiency list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the proficiency list resulting from a search.
return: (osid.learning.ProficiencyList) - the proficiency list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_proficiencies(self):
"""Gets the proficiency list resulting from a search.
return: (osid.learning.ProficiencyList) - the proficiency list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.ProficiencyList(self._results, runtime=self._runtime) |
Subsets and Splits