code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def save_data(self,session, exp_id, content):
'''save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files'''
from expfactory.database.models import (
Participant,
Result
)
subid = session.get('subid')
token = session.get('token')
self.logger.info('Saving data for subid %s' % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(Participant.id == subid).first() # better query here
# Does
if self.headless and p.token != token:
self.logger.warning('%s attempting to use mismatched token [%s] skipping save' %(p.id, token))
elif self.headless and p.token.endswith(('finished','revoked')):
self.logger.warning('%s attempting to use expired token [%s] skipping save' %(p.id, token))
else:
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content['data']
result = Result(data=content,
exp_id=exp_id,
participant_id=p.id) # check if changes from str/int
# Create and save the result
self.session.add(result)
p.results.append(result)
self.session.commit()
self.logger.info("Save [participant] %s [result] %s" %(p, result)) | save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files | Below is the the instruction that describes the task:
### Input:
save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files
### Response:
def save_data(self,session, exp_id, content):
'''save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files'''
from expfactory.database.models import (
Participant,
Result
)
subid = session.get('subid')
token = session.get('token')
self.logger.info('Saving data for subid %s' % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(Participant.id == subid).first() # better query here
# Does
if self.headless and p.token != token:
self.logger.warning('%s attempting to use mismatched token [%s] skipping save' %(p.id, token))
elif self.headless and p.token.endswith(('finished','revoked')):
self.logger.warning('%s attempting to use expired token [%s] skipping save' %(p.id, token))
else:
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content['data']
result = Result(data=content,
exp_id=exp_id,
participant_id=p.id) # check if changes from str/int
# Create and save the result
self.session.add(result)
p.results.append(result)
self.session.commit()
self.logger.info("Save [participant] %s [result] %s" %(p, result)) |
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data,
headers=("#",)):
"""Limit CNVkit input to calls with support from another caller.
get_coords is a function that return chrom, start, end from a line of the
input_file, allowing handling of multiple input file types.
"""
support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name))
for c in convert.SUBSET_BY_SUPPORT["cnvkit"]]
support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)]
if len(support_files) == 0:
return input_file
else:
out_file = os.path.join(work_dir, "%s-havesupport%s" %
utils.splitext_plus(os.path.basename(input_file)))
if not utils.file_uptodate(out_file, input_file):
input_bed = _input_to_bed(input_file, work_dir, get_coords, headers)
pass_coords = set([])
with file_transaction(data, out_file) as tx_out_file:
support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files])
tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0]
cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}"
do.run(cmd.format(**locals()), "Intersect CNVs with support files")
for r in pybedtools.BedTool(tmp_cmp_bed):
pass_coords.add((str(r.chrom), str(r.start), str(r.stop)))
with open(input_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
passes = True
if not line.startswith(headers):
passes = get_coords(line) in pass_coords
if passes:
out_handle.write(line)
return out_file | Limit CNVkit input to calls with support from another caller.
get_coords is a function that return chrom, start, end from a line of the
input_file, allowing handling of multiple input file types. | Below is the the instruction that describes the task:
### Input:
Limit CNVkit input to calls with support from another caller.
get_coords is a function that return chrom, start, end from a line of the
input_file, allowing handling of multiple input file types.
### Response:
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data,
headers=("#",)):
"""Limit CNVkit input to calls with support from another caller.
get_coords is a function that return chrom, start, end from a line of the
input_file, allowing handling of multiple input file types.
"""
support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name))
for c in convert.SUBSET_BY_SUPPORT["cnvkit"]]
support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)]
if len(support_files) == 0:
return input_file
else:
out_file = os.path.join(work_dir, "%s-havesupport%s" %
utils.splitext_plus(os.path.basename(input_file)))
if not utils.file_uptodate(out_file, input_file):
input_bed = _input_to_bed(input_file, work_dir, get_coords, headers)
pass_coords = set([])
with file_transaction(data, out_file) as tx_out_file:
support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files])
tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0]
cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}"
do.run(cmd.format(**locals()), "Intersect CNVs with support files")
for r in pybedtools.BedTool(tmp_cmp_bed):
pass_coords.add((str(r.chrom), str(r.start), str(r.stop)))
with open(input_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
passes = True
if not line.startswith(headers):
passes = get_coords(line) in pass_coords
if passes:
out_handle.write(line)
return out_file |
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name) | Register a special name like `loop`. | Below is the the instruction that describes the task:
### Input:
Register a special name like `loop`.
### Response:
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name) |
def enable_i2c(self):
"""Set this to `True` to enable the hardware I2C interface. If set to
`False` the hardware interface will be disabled and its pins (SDA and
SCL) can be used as GPIOs.
"""
config = self._interface_configuration(CONFIG_QUERY)
return config == CONFIG_GPIO_I2C or config == CONFIG_SPI_I2C | Set this to `True` to enable the hardware I2C interface. If set to
`False` the hardware interface will be disabled and its pins (SDA and
SCL) can be used as GPIOs. | Below is the the instruction that describes the task:
### Input:
Set this to `True` to enable the hardware I2C interface. If set to
`False` the hardware interface will be disabled and its pins (SDA and
SCL) can be used as GPIOs.
### Response:
def enable_i2c(self):
"""Set this to `True` to enable the hardware I2C interface. If set to
`False` the hardware interface will be disabled and its pins (SDA and
SCL) can be used as GPIOs.
"""
config = self._interface_configuration(CONFIG_QUERY)
return config == CONFIG_GPIO_I2C or config == CONFIG_SPI_I2C |
def get_user_data_iobject(user=None,group=None,data_kind=DINGOS_USER_DATA_TYPE_NAME):
"""
Returns either stored settings of a given user or default settings.
This behavior reflects the need for views to have some settings at
hand when running. The settings are returned as dict object.
"""
logger.debug("Get user settings called")
if not user.is_authenticated():
user = None
try:
user_config = UserData.objects.get(user=user,group=group,data_kind=data_kind)
return user_config.identifier.latest
except:
return None | Returns either stored settings of a given user or default settings.
This behavior reflects the need for views to have some settings at
hand when running. The settings are returned as dict object. | Below is the the instruction that describes the task:
### Input:
Returns either stored settings of a given user or default settings.
This behavior reflects the need for views to have some settings at
hand when running. The settings are returned as dict object.
### Response:
def get_user_data_iobject(user=None,group=None,data_kind=DINGOS_USER_DATA_TYPE_NAME):
"""
Returns either stored settings of a given user or default settings.
This behavior reflects the need for views to have some settings at
hand when running. The settings are returned as dict object.
"""
logger.debug("Get user settings called")
if not user.is_authenticated():
user = None
try:
user_config = UserData.objects.get(user=user,group=group,data_kind=data_kind)
return user_config.identifier.latest
except:
return None |
def cached_idxs(method):
""" this function is used as a decorator for caching """
def method_wrapper(self,*args,**kwargs):
tail = '_'.join(str(idx) for idx in args)
_cache_attr_name = '_cache_'+method.__name__+'_'+tail
_bool_attr_name = '_cached_'+method.__name__+'_'+tail
is_cached = getattr(self,_bool_attr_name)
if not is_cached:
result = method(self, *args, **kwargs)
setattr(self, _cache_attr_name, result)
setattr(self, _bool_attr_name, True)
return getattr(self,_cache_attr_name)
return method_wrapper | this function is used as a decorator for caching | Below is the the instruction that describes the task:
### Input:
this function is used as a decorator for caching
### Response:
def cached_idxs(method):
""" this function is used as a decorator for caching """
def method_wrapper(self,*args,**kwargs):
tail = '_'.join(str(idx) for idx in args)
_cache_attr_name = '_cache_'+method.__name__+'_'+tail
_bool_attr_name = '_cached_'+method.__name__+'_'+tail
is_cached = getattr(self,_bool_attr_name)
if not is_cached:
result = method(self, *args, **kwargs)
setattr(self, _cache_attr_name, result)
setattr(self, _bool_attr_name, True)
return getattr(self,_cache_attr_name)
return method_wrapper |
def indent(instr,nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str|unicode : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = '\t'*ntabs+' '*nspaces
if flatten:
pat = re.compile(r'^\s*', re.MULTILINE)
else:
pat = re.compile(r'^', re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep+ind):
return outstr[:-len(ind)]
else:
return outstr | Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str|unicode : string indented by ntabs and nspaces. | Below is the the instruction that describes the task:
### Input:
Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str|unicode : string indented by ntabs and nspaces.
### Response:
def indent(instr,nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str|unicode : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = '\t'*ntabs+' '*nspaces
if flatten:
pat = re.compile(r'^\s*', re.MULTILINE)
else:
pat = re.compile(r'^', re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep+ind):
return outstr[:-len(ind)]
else:
return outstr |
def to_lonlat(xtile, ytile, zoom):
"""Returns a tuple of (longitude, latitude) from a map tile xyz coordinate.
See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2
Arguments:
xtile - x tile location as int or float
ytile - y tile location as int or float
zoom - zoom level as int or float
"""
n = 2.0 ** zoom
lon = xtile / n * 360.0 - 180.0
# Caculate latitude in radians and convert to degrees constrained from -90
# to 90. Values too big for tile coordinate pairs are invalid and could
# overflow.
try:
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
except OverflowError:
raise ValueError('Invalid tile coordinate for zoom level %d' % zoom)
lat = math.degrees(lat_rad)
return lon, lat | Returns a tuple of (longitude, latitude) from a map tile xyz coordinate.
See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2
Arguments:
xtile - x tile location as int or float
ytile - y tile location as int or float
zoom - zoom level as int or float | Below is the the instruction that describes the task:
### Input:
Returns a tuple of (longitude, latitude) from a map tile xyz coordinate.
See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2
Arguments:
xtile - x tile location as int or float
ytile - y tile location as int or float
zoom - zoom level as int or float
### Response:
def to_lonlat(xtile, ytile, zoom):
"""Returns a tuple of (longitude, latitude) from a map tile xyz coordinate.
See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2
Arguments:
xtile - x tile location as int or float
ytile - y tile location as int or float
zoom - zoom level as int or float
"""
n = 2.0 ** zoom
lon = xtile / n * 360.0 - 180.0
# Caculate latitude in radians and convert to degrees constrained from -90
# to 90. Values too big for tile coordinate pairs are invalid and could
# overflow.
try:
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
except OverflowError:
raise ValueError('Invalid tile coordinate for zoom level %d' % zoom)
lat = math.degrees(lat_rad)
return lon, lat |
def _connect(self):
"""Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError
"""
# Attempt to get a cached connection from the connection pool
try:
connection = self._pool_manager.get(self.pid, self)
LOGGER.debug("Re-using connection for %s", self.pid)
except pool.NoIdleConnectionsError:
if self._pool_manager.is_full(self.pid):
raise
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
LOGGER.debug("Creating a new connection for %s", self.pid)
connection = self._psycopg2_connect(kwargs)
self._pool_manager.add(self.pid, connection)
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2ct connects and leaves the connection in
# a weird state: consts.STATUS_DATESTYLE, returning from
# Connection._setup without setting the state as const.STATUS_OK
if utils.PYPY:
connection.reset()
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
return connection | Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError | Below is the the instruction that describes the task:
### Input:
Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError
### Response:
def _connect(self):
"""Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError
"""
# Attempt to get a cached connection from the connection pool
try:
connection = self._pool_manager.get(self.pid, self)
LOGGER.debug("Re-using connection for %s", self.pid)
except pool.NoIdleConnectionsError:
if self._pool_manager.is_full(self.pid):
raise
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
LOGGER.debug("Creating a new connection for %s", self.pid)
connection = self._psycopg2_connect(kwargs)
self._pool_manager.add(self.pid, connection)
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2ct connects and leaves the connection in
# a weird state: consts.STATUS_DATESTYLE, returning from
# Connection._setup without setting the state as const.STATUS_OK
if utils.PYPY:
connection.reset()
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
return connection |
def _factory(slice_, axis, weighted):
"""return subclass for PairwiseSignificance, based on slice dimension types."""
if slice_.dim_types[0] == DT.MR_SUBVAR:
return _MrXCatPairwiseSignificance(slice_, axis, weighted)
return _CatXCatPairwiseSignificance(slice_, axis, weighted) | return subclass for PairwiseSignificance, based on slice dimension types. | Below is the the instruction that describes the task:
### Input:
return subclass for PairwiseSignificance, based on slice dimension types.
### Response:
def _factory(slice_, axis, weighted):
"""return subclass for PairwiseSignificance, based on slice dimension types."""
if slice_.dim_types[0] == DT.MR_SUBVAR:
return _MrXCatPairwiseSignificance(slice_, axis, weighted)
return _CatXCatPairwiseSignificance(slice_, axis, weighted) |
def write_fits(data, header, file_name):
"""
Combine data and a fits header to write a fits file.
Parameters
----------
data : numpy.ndarray
The data to be written.
header : astropy.io.fits.hduheader
The header for the fits file.
file_name : string
The file to write
Returns
-------
None
"""
hdu = fits.PrimaryHDU(data)
hdu.header = header
hdulist = fits.HDUList([hdu])
hdulist.writeto(file_name, overwrite=True)
logging.info("Wrote {0}".format(file_name))
return | Combine data and a fits header to write a fits file.
Parameters
----------
data : numpy.ndarray
The data to be written.
header : astropy.io.fits.hduheader
The header for the fits file.
file_name : string
The file to write
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Combine data and a fits header to write a fits file.
Parameters
----------
data : numpy.ndarray
The data to be written.
header : astropy.io.fits.hduheader
The header for the fits file.
file_name : string
The file to write
Returns
-------
None
### Response:
def write_fits(data, header, file_name):
"""
Combine data and a fits header to write a fits file.
Parameters
----------
data : numpy.ndarray
The data to be written.
header : astropy.io.fits.hduheader
The header for the fits file.
file_name : string
The file to write
Returns
-------
None
"""
hdu = fits.PrimaryHDU(data)
hdu.header = header
hdulist = fits.HDUList([hdu])
hdulist.writeto(file_name, overwrite=True)
logging.info("Wrote {0}".format(file_name))
return |
def update_channels(self):
'''update which channels provide input'''
self.interlock_channel = -1
self.override_channel = -1
self.zero_I_channel = -1
self.no_vtol_channel = -1
# output channels
self.rsc_out_channel = 9
self.fwd_thr_channel = 10
for ch in range(1,16):
option = self.get_mav_param("RC%u_OPTION" % ch, 0)
if option == 32:
self.interlock_channel = ch;
elif option == 63:
self.override_channel = ch;
elif option == 64:
self.zero_I_channel = ch;
elif option == 65:
self.override_channel = ch;
elif option == 66:
self.no_vtol_channel = ch;
function = self.get_mav_param("SERVO%u_FUNCTION" % ch, 0)
if function == 32:
self.rsc_out_channel = ch
if function == 70:
self.fwd_thr_channel = ch | update which channels provide input | Below is the the instruction that describes the task:
### Input:
update which channels provide input
### Response:
def update_channels(self):
'''update which channels provide input'''
self.interlock_channel = -1
self.override_channel = -1
self.zero_I_channel = -1
self.no_vtol_channel = -1
# output channels
self.rsc_out_channel = 9
self.fwd_thr_channel = 10
for ch in range(1,16):
option = self.get_mav_param("RC%u_OPTION" % ch, 0)
if option == 32:
self.interlock_channel = ch;
elif option == 63:
self.override_channel = ch;
elif option == 64:
self.zero_I_channel = ch;
elif option == 65:
self.override_channel = ch;
elif option == 66:
self.no_vtol_channel = ch;
function = self.get_mav_param("SERVO%u_FUNCTION" % ch, 0)
if function == 32:
self.rsc_out_channel = ch
if function == 70:
self.fwd_thr_channel = ch |
def declination_spencer71(dayofyear):
"""
Solar declination from Duffie & Beckman [1] and attributed to
Spencer (1971) and Iqbal (1983).
.. warning::
Return units are radians, not degrees.
Parameters
----------
dayofyear : numeric
Returns
-------
declination (radians) : numeric
Angular position of the sun at solar noon relative to the plane of the
equator, approximately between +/-23.45 (degrees).
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006)
[2] J. W. Spencer, "Fourier series representation of the position of the
sun" in Search 2 (5), p. 172 (1971)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 4 CRC Press (2013)
See Also
--------
declination_cooper69
"""
day_angle = _calculate_simple_day_angle(dayofyear)
return (
0.006918 -
0.399912 * np.cos(day_angle) + 0.070257 * np.sin(day_angle) -
0.006758 * np.cos(2. * day_angle) + 0.000907 * np.sin(2. * day_angle) -
0.002697 * np.cos(3. * day_angle) + 0.00148 * np.sin(3. * day_angle)
) | Solar declination from Duffie & Beckman [1] and attributed to
Spencer (1971) and Iqbal (1983).
.. warning::
Return units are radians, not degrees.
Parameters
----------
dayofyear : numeric
Returns
-------
declination (radians) : numeric
Angular position of the sun at solar noon relative to the plane of the
equator, approximately between +/-23.45 (degrees).
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006)
[2] J. W. Spencer, "Fourier series representation of the position of the
sun" in Search 2 (5), p. 172 (1971)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 4 CRC Press (2013)
See Also
--------
declination_cooper69 | Below is the the instruction that describes the task:
### Input:
Solar declination from Duffie & Beckman [1] and attributed to
Spencer (1971) and Iqbal (1983).
.. warning::
Return units are radians, not degrees.
Parameters
----------
dayofyear : numeric
Returns
-------
declination (radians) : numeric
Angular position of the sun at solar noon relative to the plane of the
equator, approximately between +/-23.45 (degrees).
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006)
[2] J. W. Spencer, "Fourier series representation of the position of the
sun" in Search 2 (5), p. 172 (1971)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 4 CRC Press (2013)
See Also
--------
declination_cooper69
### Response:
def declination_spencer71(dayofyear):
"""
Solar declination from Duffie & Beckman [1] and attributed to
Spencer (1971) and Iqbal (1983).
.. warning::
Return units are radians, not degrees.
Parameters
----------
dayofyear : numeric
Returns
-------
declination (radians) : numeric
Angular position of the sun at solar noon relative to the plane of the
equator, approximately between +/-23.45 (degrees).
References
----------
[1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal
Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006)
[2] J. W. Spencer, "Fourier series representation of the position of the
sun" in Search 2 (5), p. 172 (1971)
[3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable
Energy Applications", p. 4 CRC Press (2013)
See Also
--------
declination_cooper69
"""
day_angle = _calculate_simple_day_angle(dayofyear)
return (
0.006918 -
0.399912 * np.cos(day_angle) + 0.070257 * np.sin(day_angle) -
0.006758 * np.cos(2. * day_angle) + 0.000907 * np.sin(2. * day_angle) -
0.002697 * np.cos(3. * day_angle) + 0.00148 * np.sin(3. * day_angle)
) |
def pull_3year(self):
"""Returns a list (in JSON format) containing all the events from the Penn iCal Calendar.
List contains events in chronological order.
Each element of the list is a dictionary, containing:
- Name of the event 'name'
- Start date 'start'
- End date 'end'
"""
events = []
for term in ["fall", "summer", "spring"]:
url = "{}{}{}term.ics".format(BASE_URL, datetime.datetime.now().year, term)
resp = requests.get(url)
resp.raise_for_status()
r = resp.text
lines = r.split("\n")
d = {}
for line in lines:
if line == "BEGIN:VEVENT":
d = {}
elif line.startswith("DTSTART"):
raw_date = line.split(":")[1]
start_date = datetime.datetime.strptime(raw_date, '%Y%m%d').date()
d['start'] = start_date.strftime('%Y-%m-%d')
elif line.startswith("DTEND"):
raw_date = line.split(":")[1]
end_date = datetime.datetime.strptime(raw_date, '%Y%m%d').date()
d['end'] = end_date.strftime('%Y-%m-%d')
elif line.startswith("SUMMARY"):
name = line.split(":")[1]
d['name'] = str(name).strip()
elif line == "END:VEVENT":
events.append(d)
events.sort(key=lambda d: d['start'])
return events | Returns a list (in JSON format) containing all the events from the Penn iCal Calendar.
List contains events in chronological order.
Each element of the list is a dictionary, containing:
- Name of the event 'name'
- Start date 'start'
- End date 'end' | Below is the the instruction that describes the task:
### Input:
Returns a list (in JSON format) containing all the events from the Penn iCal Calendar.
List contains events in chronological order.
Each element of the list is a dictionary, containing:
- Name of the event 'name'
- Start date 'start'
- End date 'end'
### Response:
def pull_3year(self):
"""Returns a list (in JSON format) containing all the events from the Penn iCal Calendar.
List contains events in chronological order.
Each element of the list is a dictionary, containing:
- Name of the event 'name'
- Start date 'start'
- End date 'end'
"""
events = []
for term in ["fall", "summer", "spring"]:
url = "{}{}{}term.ics".format(BASE_URL, datetime.datetime.now().year, term)
resp = requests.get(url)
resp.raise_for_status()
r = resp.text
lines = r.split("\n")
d = {}
for line in lines:
if line == "BEGIN:VEVENT":
d = {}
elif line.startswith("DTSTART"):
raw_date = line.split(":")[1]
start_date = datetime.datetime.strptime(raw_date, '%Y%m%d').date()
d['start'] = start_date.strftime('%Y-%m-%d')
elif line.startswith("DTEND"):
raw_date = line.split(":")[1]
end_date = datetime.datetime.strptime(raw_date, '%Y%m%d').date()
d['end'] = end_date.strftime('%Y-%m-%d')
elif line.startswith("SUMMARY"):
name = line.split(":")[1]
d['name'] = str(name).strip()
elif line == "END:VEVENT":
events.append(d)
events.sort(key=lambda d: d['start'])
return events |
def validateOneElement(self, doc, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o)
return ret | Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately | Below is the the instruction that describes the task:
### Input:
Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately
### Response:
def validateOneElement(self, doc, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o)
return ret |
def idle_task(self):
'''called in idle time'''
try:
data = self.port.recv(200)
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return
raise
if len(data) > 110:
print("DGPS data too large: %u bytes" % len(data))
return
try:
self.master.mav.gps_inject_data_send(
self.target_system,
self.target_component,
len(data),
bytearray(data.ljust(110, '\0')))
except Exception(e):
print ("DGPS Failed:", e) | called in idle time | Below is the the instruction that describes the task:
### Input:
called in idle time
### Response:
def idle_task(self):
'''called in idle time'''
try:
data = self.port.recv(200)
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return
raise
if len(data) > 110:
print("DGPS data too large: %u bytes" % len(data))
return
try:
self.master.mav.gps_inject_data_send(
self.target_system,
self.target_component,
len(data),
bytearray(data.ljust(110, '\0')))
except Exception(e):
print ("DGPS Failed:", e) |
def validate(self, size):
"""
Ensure that the size of the dimension matches the number of bands in the
scale
Raises:
ValueError: when the dimension size and number of bands don't match
"""
msg = 'scale and array size must match, ' \
'but were scale: {self.scale.n_bands}, array size: {size}'
if size != len(self.scale):
raise ValueError(msg.format(**locals())) | Ensure that the size of the dimension matches the number of bands in the
scale
Raises:
ValueError: when the dimension size and number of bands don't match | Below is the the instruction that describes the task:
### Input:
Ensure that the size of the dimension matches the number of bands in the
scale
Raises:
ValueError: when the dimension size and number of bands don't match
### Response:
def validate(self, size):
"""
Ensure that the size of the dimension matches the number of bands in the
scale
Raises:
ValueError: when the dimension size and number of bands don't match
"""
msg = 'scale and array size must match, ' \
'but were scale: {self.scale.n_bands}, array size: {size}'
if size != len(self.scale):
raise ValueError(msg.format(**locals())) |
def _initiate_starttls(self, **kwargs):
"""Initiate starttls handshake over the socket.
"""
if self._tls_state == "connected":
raise RuntimeError("Already TLS-connected")
kwargs["do_handshake_on_connect"] = False
logger.debug("Wrapping the socket into ssl")
self._socket = ssl.wrap_socket(self._socket, **kwargs)
self._set_state("tls-handshake")
self._continue_tls_handshake() | Initiate starttls handshake over the socket. | Below is the the instruction that describes the task:
### Input:
Initiate starttls handshake over the socket.
### Response:
def _initiate_starttls(self, **kwargs):
"""Initiate starttls handshake over the socket.
"""
if self._tls_state == "connected":
raise RuntimeError("Already TLS-connected")
kwargs["do_handshake_on_connect"] = False
logger.debug("Wrapping the socket into ssl")
self._socket = ssl.wrap_socket(self._socket, **kwargs)
self._set_state("tls-handshake")
self._continue_tls_handshake() |
def next(self):
"""Move iterator position forward"""
batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3))
i = self.cur
for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)):
str_img = open(self.root+self.list[i]+'.jpg').read()
img = imdecode(str_img, 1)
img, _ = random_crop(img, self.size)
batch[i - self.cur] = img
batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2))
ret = mx.io.DataBatch(data=[batch],
label=[],
pad=self.batch_size-(i-self.cur),
index=None)
self.cur = i
return ret | Move iterator position forward | Below is the the instruction that describes the task:
### Input:
Move iterator position forward
### Response:
def next(self):
"""Move iterator position forward"""
batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3))
i = self.cur
for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)):
str_img = open(self.root+self.list[i]+'.jpg').read()
img = imdecode(str_img, 1)
img, _ = random_crop(img, self.size)
batch[i - self.cur] = img
batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2))
ret = mx.io.DataBatch(data=[batch],
label=[],
pad=self.batch_size-(i-self.cur),
index=None)
self.cur = i
return ret |
def add_key(self):
"Add ssh key to gitlab if necessary"
try:
with open(self.args.ssh_public_key) as f:
public_key = f.read().strip()
except:
log.debug("No key found in {}".format(self.args.ssh_public_key))
return None
g = self.gitlab
url = g['url'] + "/user/keys"
query = {'private_token': g['token']}
keys = requests.get(url, params=query).json()
log.debug("looking for '" + public_key + "' in " + str(keys))
if (list(filter(lambda key: key['key'] == public_key, keys))):
log.debug(self.args.ssh_public_key + " already exists")
return None
else:
name = 'github2gitlab'
log.info("add " + name + " ssh public key from " +
self.args.ssh_public_key)
query['title'] = name
query['key'] = public_key
result = requests.post(url, query)
if result.status_code != requests.codes.created:
log.warn('Key {} already in GitLab. '
'Possible under a different user. Skipping...'
.format(self.args.ssh_public_key))
return public_key | Add ssh key to gitlab if necessary | Below is the the instruction that describes the task:
### Input:
Add ssh key to gitlab if necessary
### Response:
def add_key(self):
"Add ssh key to gitlab if necessary"
try:
with open(self.args.ssh_public_key) as f:
public_key = f.read().strip()
except:
log.debug("No key found in {}".format(self.args.ssh_public_key))
return None
g = self.gitlab
url = g['url'] + "/user/keys"
query = {'private_token': g['token']}
keys = requests.get(url, params=query).json()
log.debug("looking for '" + public_key + "' in " + str(keys))
if (list(filter(lambda key: key['key'] == public_key, keys))):
log.debug(self.args.ssh_public_key + " already exists")
return None
else:
name = 'github2gitlab'
log.info("add " + name + " ssh public key from " +
self.args.ssh_public_key)
query['title'] = name
query['key'] = public_key
result = requests.post(url, query)
if result.status_code != requests.codes.created:
log.warn('Key {} already in GitLab. '
'Possible under a different user. Skipping...'
.format(self.args.ssh_public_key))
return public_key |
def _get_perspective_coeffs(startpoints, endpoints):
"""Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
image
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
"""
matrix = []
for p1, p2 in zip(endpoints, startpoints):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = torch.tensor(matrix, dtype=torch.float)
B = torch.tensor(startpoints, dtype=torch.float).view(8)
res = torch.gels(B, A)[0]
return res.squeeze_(1).tolist() | Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
image
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel. | Below is the the instruction that describes the task:
### Input:
Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
image
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
### Response:
def _get_perspective_coeffs(startpoints, endpoints):
"""Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
image
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
"""
matrix = []
for p1, p2 in zip(endpoints, startpoints):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
A = torch.tensor(matrix, dtype=torch.float)
B = torch.tensor(startpoints, dtype=torch.float).view(8)
res = torch.gels(B, A)[0]
return res.squeeze_(1).tolist() |
def process_frames_mouth(self, frames):
"""
Preprocess from frames using mouth detector
"""
self.face = np.array(frames)
self.mouth = np.array(frames)
self.set_data(frames) | Preprocess from frames using mouth detector | Below is the the instruction that describes the task:
### Input:
Preprocess from frames using mouth detector
### Response:
def process_frames_mouth(self, frames):
"""
Preprocess from frames using mouth detector
"""
self.face = np.array(frames)
self.mouth = np.array(frames)
self.set_data(frames) |
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None):
"""Extends a given object for API Production."""
# Cast all int_keys to int()
if int_keys:
for in_key in int_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
in_dict[in_key] = int(in_dict[in_key])
# Cast all date_keys to datetime.isoformat
if date_keys:
for in_key in date_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
_from = in_dict[in_key]
if isinstance(_from, basestring):
dtime = parse_datetime(_from)
elif isinstance(_from, datetime):
dtime = _from
in_dict[in_key] = dtime.isoformat()
elif (in_key in in_dict) and in_dict.get(in_key, None) is None:
del in_dict[in_key]
# Remove all Nones
for k, v in in_dict.items():
if v is None:
del in_dict[k]
return in_dict | Extends a given object for API Production. | Below is the the instruction that describes the task:
### Input:
Extends a given object for API Production.
### Response:
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None):
"""Extends a given object for API Production."""
# Cast all int_keys to int()
if int_keys:
for in_key in int_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
in_dict[in_key] = int(in_dict[in_key])
# Cast all date_keys to datetime.isoformat
if date_keys:
for in_key in date_keys:
if (in_key in in_dict) and (in_dict.get(in_key, None) is not None):
_from = in_dict[in_key]
if isinstance(_from, basestring):
dtime = parse_datetime(_from)
elif isinstance(_from, datetime):
dtime = _from
in_dict[in_key] = dtime.isoformat()
elif (in_key in in_dict) and in_dict.get(in_key, None) is None:
del in_dict[in_key]
# Remove all Nones
for k, v in in_dict.items():
if v is None:
del in_dict[k]
return in_dict |
def load_settings(self, daemon_config, context, origin=None):
"""
The pollers have dependency on the context manager
of the X-Ray recorder. They will respect the customer
specified xray client to poll sampling rules/targets.
Otherwise they falls back to use the same X-Ray daemon
as the emitter.
"""
self._connector.setup_xray_client(ip=daemon_config.tcp_ip,
port=daemon_config.tcp_port,
client=self.xray_client)
self._connector.context = context
self._origin = origin | The pollers have dependency on the context manager
of the X-Ray recorder. They will respect the customer
specified xray client to poll sampling rules/targets.
Otherwise they falls back to use the same X-Ray daemon
as the emitter. | Below is the the instruction that describes the task:
### Input:
The pollers have dependency on the context manager
of the X-Ray recorder. They will respect the customer
specified xray client to poll sampling rules/targets.
Otherwise they falls back to use the same X-Ray daemon
as the emitter.
### Response:
def load_settings(self, daemon_config, context, origin=None):
"""
The pollers have dependency on the context manager
of the X-Ray recorder. They will respect the customer
specified xray client to poll sampling rules/targets.
Otherwise they falls back to use the same X-Ray daemon
as the emitter.
"""
self._connector.setup_xray_client(ip=daemon_config.tcp_ip,
port=daemon_config.tcp_port,
client=self.xray_client)
self._connector.context = context
self._origin = origin |
def shepp_logan_ellipsoids(ndim, modified=False):
"""Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions.
Parameters
----------
ndim : {2, 3}
Dimension of the space the ellipsoids should be in.
modified : bool, optional
True if the modified Shepp-Logan phantom should be given.
The modified phantom has greatly amplified contrast to aid
visualization.
See Also
--------
odl.phantom.geometric.ellipsoid_phantom :
Function for creating arbitrary ellipsoids phantoms
shepp_logan : Create a phantom with these ellipsoids
References
----------
.. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
"""
if ndim == 2:
ellipsoids = _shepp_logan_ellipse_2d()
elif ndim == 3:
ellipsoids = _shepp_logan_ellipsoids_3d()
else:
raise ValueError('dimension not 2 or 3, no phantom available')
if modified:
_modified_shepp_logan_ellipsoids(ellipsoids)
return ellipsoids | Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions.
Parameters
----------
ndim : {2, 3}
Dimension of the space the ellipsoids should be in.
modified : bool, optional
True if the modified Shepp-Logan phantom should be given.
The modified phantom has greatly amplified contrast to aid
visualization.
See Also
--------
odl.phantom.geometric.ellipsoid_phantom :
Function for creating arbitrary ellipsoids phantoms
shepp_logan : Create a phantom with these ellipsoids
References
----------
.. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom | Below is the the instruction that describes the task:
### Input:
Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions.
Parameters
----------
ndim : {2, 3}
Dimension of the space the ellipsoids should be in.
modified : bool, optional
True if the modified Shepp-Logan phantom should be given.
The modified phantom has greatly amplified contrast to aid
visualization.
See Also
--------
odl.phantom.geometric.ellipsoid_phantom :
Function for creating arbitrary ellipsoids phantoms
shepp_logan : Create a phantom with these ellipsoids
References
----------
.. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
### Response:
def shepp_logan_ellipsoids(ndim, modified=False):
"""Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions.
Parameters
----------
ndim : {2, 3}
Dimension of the space the ellipsoids should be in.
modified : bool, optional
True if the modified Shepp-Logan phantom should be given.
The modified phantom has greatly amplified contrast to aid
visualization.
See Also
--------
odl.phantom.geometric.ellipsoid_phantom :
Function for creating arbitrary ellipsoids phantoms
shepp_logan : Create a phantom with these ellipsoids
References
----------
.. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
"""
if ndim == 2:
ellipsoids = _shepp_logan_ellipse_2d()
elif ndim == 3:
ellipsoids = _shepp_logan_ellipsoids_3d()
else:
raise ValueError('dimension not 2 or 3, no phantom available')
if modified:
_modified_shepp_logan_ellipsoids(ellipsoids)
return ellipsoids |
def GetCalendarFieldValuesTuple(self):
"""Return the tuple of calendar.txt values or None if this ServicePeriod
should not be in calendar.txt ."""
if self.start_date and self.end_date:
return [getattr(self, fn) for fn in self._FIELD_NAMES] | Return the tuple of calendar.txt values or None if this ServicePeriod
should not be in calendar.txt . | Below is the the instruction that describes the task:
### Input:
Return the tuple of calendar.txt values or None if this ServicePeriod
should not be in calendar.txt .
### Response:
def GetCalendarFieldValuesTuple(self):
"""Return the tuple of calendar.txt values or None if this ServicePeriod
should not be in calendar.txt ."""
if self.start_date and self.end_date:
return [getattr(self, fn) for fn in self._FIELD_NAMES] |
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) | Return package version as listed in `__version__` in `init.py`. | Below is the the instruction that describes the task:
### Input:
Return package version as listed in `__version__` in `init.py`.
### Response:
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = codecs.open(os.path.join(package, '__init__.py'), encoding='utf-8').read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) |
def setOverlayTexture(self, ulOverlayHandle):
"""
Texture to draw for the overlay. This function can only be called by the overlay's creator or renderer process (see SetOverlayRenderingPid) .
* OpenGL dirty state:
glBindTexture
"""
fn = self.function_table.setOverlayTexture
pTexture = Texture_t()
result = fn(ulOverlayHandle, byref(pTexture))
return result, pTexture | Texture to draw for the overlay. This function can only be called by the overlay's creator or renderer process (see SetOverlayRenderingPid) .
* OpenGL dirty state:
glBindTexture | Below is the the instruction that describes the task:
### Input:
Texture to draw for the overlay. This function can only be called by the overlay's creator or renderer process (see SetOverlayRenderingPid) .
* OpenGL dirty state:
glBindTexture
### Response:
def setOverlayTexture(self, ulOverlayHandle):
"""
Texture to draw for the overlay. This function can only be called by the overlay's creator or renderer process (see SetOverlayRenderingPid) .
* OpenGL dirty state:
glBindTexture
"""
fn = self.function_table.setOverlayTexture
pTexture = Texture_t()
result = fn(ulOverlayHandle, byref(pTexture))
return result, pTexture |
def status(self):
"""
check the status of the network and the peers
:return: network_height, peer_status
"""
peer = random.choice(self.PEERS)
formatted_peer = 'http://{}:4001'.format(peer)
peerdata = requests.get(url=formatted_peer + '/api/peers/').json()['peers']
peers_status = {}
networkheight = max([x['height'] for x in peerdata])
for i in peerdata:
if 'http://{}:4001'.format(i['ip']) in self.PEERS:
peers_status.update({i['ip']: {
'height': i['height'],
'status': i['status'],
'version': i['version'],
'delay': i['delay'],
}})
return {
'network_height': networkheight,
'peer_status': peers_status
} | check the status of the network and the peers
:return: network_height, peer_status | Below is the the instruction that describes the task:
### Input:
check the status of the network and the peers
:return: network_height, peer_status
### Response:
def status(self):
"""
check the status of the network and the peers
:return: network_height, peer_status
"""
peer = random.choice(self.PEERS)
formatted_peer = 'http://{}:4001'.format(peer)
peerdata = requests.get(url=formatted_peer + '/api/peers/').json()['peers']
peers_status = {}
networkheight = max([x['height'] for x in peerdata])
for i in peerdata:
if 'http://{}:4001'.format(i['ip']) in self.PEERS:
peers_status.update({i['ip']: {
'height': i['height'],
'status': i['status'],
'version': i['version'],
'delay': i['delay'],
}})
return {
'network_height': networkheight,
'peer_status': peers_status
} |
def event_listeners(self):
"""List of registered event listeners."""
return (self.__command_listeners[:],
self.__server_heartbeat_listeners[:],
self.__server_listeners[:],
self.__topology_listeners[:]) | List of registered event listeners. | Below is the the instruction that describes the task:
### Input:
List of registered event listeners.
### Response:
def event_listeners(self):
"""List of registered event listeners."""
return (self.__command_listeners[:],
self.__server_heartbeat_listeners[:],
self.__server_listeners[:],
self.__topology_listeners[:]) |
def transpose(self,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
normalization_ctor=None,
normalization_kwargs=None,
normalize_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None,
use_bias=None,
batch_norm_config=None,
data_format=None,
custom_getter=None):
"""Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether
the channel dimension of the input and output is the last dimension.
Default is `self._data_format`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
Returns:
Matching `ConvNet2DTranspose` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
ValueError: If the given data_format is not a supported format ("NHWC" or
"NCHW").
NotImplementedError: If the convolutions are dilated.
"""
for rate in self._rates:
if rate != 1:
raise NotImplementedError("Transpose dilated convolutions "
"are not supported")
output_shapes = []
if data_format is None:
data_format = self._data_format
if data_format == DATA_FORMAT_NHWC:
start_dim, end_dim = 1, -1
elif data_format == DATA_FORMAT_NCHW:
start_dim, end_dim = 2, 4
else:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
if custom_getter is None and self._custom_getter is not None:
tf.logging.warning(
"This convnet was constructed with a custom getter, but the "
"`transpose` method was not given any. The transposed ConvNet will "
"not be using any custom_getter.")
for layer in reversed(self._layers):
output_shapes.append(lambda l=layer: l.input_shape[start_dim:end_dim])
transpose_constructor = functools.partial(ConvNet2DTranspose,
output_shapes=output_shapes,
custom_getter=custom_getter)
return self._transpose(
transpose_constructor=transpose_constructor,
name=name,
output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs,
normalize_final=normalize_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_bias=use_bias,
data_format=data_format) | Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether
the channel dimension of the input and output is the last dimension.
Default is `self._data_format`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
Returns:
Matching `ConvNet2DTranspose` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
ValueError: If the given data_format is not a supported format ("NHWC" or
"NCHW").
NotImplementedError: If the convolutions are dilated. | Below is the the instruction that describes the task:
### Input:
Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether
the channel dimension of the input and output is the last dimension.
Default is `self._data_format`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
Returns:
Matching `ConvNet2DTranspose` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
ValueError: If the given data_format is not a supported format ("NHWC" or
"NCHW").
NotImplementedError: If the convolutions are dilated.
### Response:
def transpose(self,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
normalization_ctor=None,
normalization_kwargs=None,
normalize_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None,
use_bias=None,
batch_norm_config=None,
data_format=None,
custom_getter=None):
"""Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
normalization_ctor: Constructor to return a callable which will perform
normalization at each layer. Defaults to None / no normalization.
Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If
a string is provided, importlib is used to convert the string to a
callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be
provided.
normalization_kwargs: kwargs to be provided to `normalization_ctor` when
it is called.
normalize_final: Whether to apply normalization after the final conv
layer. Default is to take the value of activate_final.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether
the channel dimension of the input and output is the last dimension.
Default is `self._data_format`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the
`tf.get_variable` documentation for information about the
custom_getter API.
Returns:
Matching `ConvNet2DTranspose` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
ValueError: If the given data_format is not a supported format ("NHWC" or
"NCHW").
NotImplementedError: If the convolutions are dilated.
"""
for rate in self._rates:
if rate != 1:
raise NotImplementedError("Transpose dilated convolutions "
"are not supported")
output_shapes = []
if data_format is None:
data_format = self._data_format
if data_format == DATA_FORMAT_NHWC:
start_dim, end_dim = 1, -1
elif data_format == DATA_FORMAT_NCHW:
start_dim, end_dim = 2, 4
else:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{}".format(data_format, SUPPORTED_2D_DATA_FORMATS))
if custom_getter is None and self._custom_getter is not None:
tf.logging.warning(
"This convnet was constructed with a custom getter, but the "
"`transpose` method was not given any. The transposed ConvNet will "
"not be using any custom_getter.")
for layer in reversed(self._layers):
output_shapes.append(lambda l=layer: l.input_shape[start_dim:end_dim])
transpose_constructor = functools.partial(ConvNet2DTranspose,
output_shapes=output_shapes,
custom_getter=custom_getter)
return self._transpose(
transpose_constructor=transpose_constructor,
name=name,
output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
normalization_ctor=normalization_ctor,
normalization_kwargs=normalization_kwargs,
normalize_final=normalize_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_bias=use_bias,
data_format=data_format) |
def generate_ssh_key():
"""
Generates an SSH deploy public and private key.
Returns (private key, public key), a tuple of byte strings.
"""
key = rsa.generate_private_key(
backend=default_backend(),
public_exponent=65537,
key_size=4096
)
private_key = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption())
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
)
return private_key, public_key | Generates an SSH deploy public and private key.
Returns (private key, public key), a tuple of byte strings. | Below is the the instruction that describes the task:
### Input:
Generates an SSH deploy public and private key.
Returns (private key, public key), a tuple of byte strings.
### Response:
def generate_ssh_key():
"""
Generates an SSH deploy public and private key.
Returns (private key, public key), a tuple of byte strings.
"""
key = rsa.generate_private_key(
backend=default_backend(),
public_exponent=65537,
key_size=4096
)
private_key = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption())
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
)
return private_key, public_key |
def create_api_stage(restApiId, stageName, deploymentId, description='',
cacheClusterEnabled=False, cacheClusterSize='0.5', variables=None,
region=None, key=None, keyid=None, profile=None):
'''
Creates a new API stage for a given restApiId and deploymentId.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\
description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
'''
try:
variables = dict() if variables is None else variables
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stage = conn.create_stage(restApiId=restApiId, stageName=stageName, deploymentId=deploymentId,
description=description, cacheClusterEnabled=cacheClusterEnabled,
cacheClusterSize=cacheClusterSize, variables=variables)
return {'created': True, 'stage': _convert_datetime_str(stage)}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | Creates a new API stage for a given restApiId and deploymentId.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\
description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}' | Below is the the instruction that describes the task:
### Input:
Creates a new API stage for a given restApiId and deploymentId.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\
description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
### Response:
def create_api_stage(restApiId, stageName, deploymentId, description='',
cacheClusterEnabled=False, cacheClusterSize='0.5', variables=None,
region=None, key=None, keyid=None, profile=None):
'''
Creates a new API stage for a given restApiId and deploymentId.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\
description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
'''
try:
variables = dict() if variables is None else variables
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stage = conn.create_stage(restApiId=restApiId, stageName=stageName, deploymentId=deploymentId,
description=description, cacheClusterEnabled=cacheClusterEnabled,
cacheClusterSize=cacheClusterSize, variables=variables)
return {'created': True, 'stage': _convert_datetime_str(stage)}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} |
def transform(self, jam):
'''Apply the sequence of transformations to a single jam object.
Parameters
----------
jam : jams.JAMS
The jam object to transform
Yields
------
jam_out : jams.JAMS
The jam objects produced by the transformation sequence
'''
for output in self.__recursive_transform(jam, self.steps):
yield output | Apply the sequence of transformations to a single jam object.
Parameters
----------
jam : jams.JAMS
The jam object to transform
Yields
------
jam_out : jams.JAMS
The jam objects produced by the transformation sequence | Below is the the instruction that describes the task:
### Input:
Apply the sequence of transformations to a single jam object.
Parameters
----------
jam : jams.JAMS
The jam object to transform
Yields
------
jam_out : jams.JAMS
The jam objects produced by the transformation sequence
### Response:
def transform(self, jam):
'''Apply the sequence of transformations to a single jam object.
Parameters
----------
jam : jams.JAMS
The jam object to transform
Yields
------
jam_out : jams.JAMS
The jam objects produced by the transformation sequence
'''
for output in self.__recursive_transform(jam, self.steps):
yield output |
def power_corr(r=None, n=None, power=None, alpha=0.05, tail='two-sided'):
"""
Evaluate power, sample size, correlation coefficient or
significance level of a correlation test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
tail : str
Indicates whether the test is "two-sided" or "one-sided".
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must
be passed as None, and that parameter is determined from the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
This function is a mere Python translation of the original `pwr.r.test`
function implemented in the `pwr` R package.
All credit goes to the author, Stephane Champely.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80,
... tail='one-sided'))
n: 22.6091
3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
4. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80,
... alpha=None))
alpha: 0.1377
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [r, n, power, alpha]])
if n_none != 1:
raise ValueError('Exactly one of n, r, power, and alpha must be None')
# Safety checks
if r is not None:
assert -1 <= r <= 1
r = abs(r)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
if n is not None:
assert n > 4
# Define main function
if tail == 'two-sided':
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha / 2, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) + \
stats.norm.cdf((-zr - zrc) * np.sqrt(n - 3))
return power
else:
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3))
return power
# Evaluate missing variable
if power is None and n is not None and r is not None:
# Compute achieved power given r, n and alpha
return func(r, n, power=None, alpha=alpha)
elif n is None and power is not None and r is not None:
# Compute required sample size given r, power and alpha
def _eval_n(n, r, power, alpha):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_n, 4 + 1e-10, 1e+09, args=(r, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif r is None and power is not None and n is not None:
# Compute achieved r given sample size, power and alpha level
def _eval_r(r, n, power, alpha):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_r, 1e-10, 1 - 1e-10, args=(n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha (significance) level given r, n and power
def _eval_alpha(alpha, r, n, power):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(r, n, power))
except ValueError: # pragma: no cover
return np.nan | Evaluate power, sample size, correlation coefficient or
significance level of a correlation test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
tail : str
Indicates whether the test is "two-sided" or "one-sided".
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must
be passed as None, and that parameter is determined from the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
This function is a mere Python translation of the original `pwr.r.test`
function implemented in the `pwr` R package.
All credit goes to the author, Stephane Champely.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80,
... tail='one-sided'))
n: 22.6091
3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
4. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80,
... alpha=None))
alpha: 0.1377 | Below is the the instruction that describes the task:
### Input:
Evaluate power, sample size, correlation coefficient or
significance level of a correlation test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
tail : str
Indicates whether the test is "two-sided" or "one-sided".
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must
be passed as None, and that parameter is determined from the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
This function is a mere Python translation of the original `pwr.r.test`
function implemented in the `pwr` R package.
All credit goes to the author, Stephane Champely.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80,
... tail='one-sided'))
n: 22.6091
3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
4. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80,
... alpha=None))
alpha: 0.1377
### Response:
def power_corr(r=None, n=None, power=None, alpha=0.05, tail='two-sided'):
"""
Evaluate power, sample size, correlation coefficient or
significance level of a correlation test.
Parameters
----------
r : float
Correlation coefficient.
n : int
Number of observations (sample size).
power : float
Test power (= 1 - type II error).
alpha : float
Significance level (type I error probability).
The default is 0.05.
tail : str
Indicates whether the test is "two-sided" or "one-sided".
Notes
-----
Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must
be passed as None, and that parameter is determined from the others.
Notice that ``alpha`` has a default value of 0.05 so None must be
explicitly passed if you want to compute it.
:py:func:`scipy.optimize.brenth` is used to solve power equations for other
variables (i.e. sample size, effect size, or significance level). If the
solving fails, a nan value is returned.
This function is a mere Python translation of the original `pwr.r.test`
function implemented in the `pwr` R package.
All credit goes to the author, Stephane Champely.
References
----------
.. [1] Cohen, J. (1988). Statistical power analysis for the behavioral
sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
.. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf
Examples
--------
1. Compute achieved power given ``r``, ``n`` and ``alpha``
>>> from pingouin import power_corr
>>> print('power: %.4f' % power_corr(r=0.5, n=20))
power: 0.6379
2. Compute required sample size given ``r``, ``power`` and ``alpha``
>>> print('n: %.4f' % power_corr(r=0.5, power=0.80,
... tail='one-sided'))
n: 22.6091
3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level
>>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05))
r: 0.5822
4. Compute achieved alpha level given ``r``, ``n`` and ``power``
>>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80,
... alpha=None))
alpha: 0.1377
"""
# Check the number of arguments that are None
n_none = sum([v is None for v in [r, n, power, alpha]])
if n_none != 1:
raise ValueError('Exactly one of n, r, power, and alpha must be None')
# Safety checks
if r is not None:
assert -1 <= r <= 1
r = abs(r)
if alpha is not None:
assert 0 < alpha <= 1
if power is not None:
assert 0 < power <= 1
if n is not None:
assert n > 4
# Define main function
if tail == 'two-sided':
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha / 2, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) + \
stats.norm.cdf((-zr - zrc) * np.sqrt(n - 3))
return power
else:
def func(r, n, power, alpha):
dof = n - 2
ttt = stats.t.ppf(1 - alpha, dof)
rc = np.sqrt(ttt**2 / (ttt**2 + dof))
zr = np.arctanh(r) + r / (2 * (n - 1))
zrc = np.arctanh(rc)
power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3))
return power
# Evaluate missing variable
if power is None and n is not None and r is not None:
# Compute achieved power given r, n and alpha
return func(r, n, power=None, alpha=alpha)
elif n is None and power is not None and r is not None:
# Compute required sample size given r, power and alpha
def _eval_n(n, r, power, alpha):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_n, 4 + 1e-10, 1e+09, args=(r, power, alpha))
except ValueError: # pragma: no cover
return np.nan
elif r is None and power is not None and n is not None:
# Compute achieved r given sample size, power and alpha level
def _eval_r(r, n, power, alpha):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_r, 1e-10, 1 - 1e-10, args=(n, power, alpha))
except ValueError: # pragma: no cover
return np.nan
else:
# Compute achieved alpha (significance) level given r, n and power
def _eval_alpha(alpha, r, n, power):
return func(r, n, power, alpha) - power
try:
return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(r, n, power))
except ValueError: # pragma: no cover
return np.nan |
def data(self, **query):
"""Query for Data object annotation."""
objects = self.cache['objects']
data = self.api.data.get(**query)['objects']
data_objects = []
for d in data:
_id = d['id']
if _id in objects:
# Update existing object
objects[_id].update(d)
else:
# Insert new object
objects[_id] = GenData(d, self)
data_objects.append(objects[_id])
# Hydrate reference fields
for d in data_objects:
count += 1
while True:
ref_annotation = {}
remove_annotation = []
for path, ann in d.annotation.items():
if ann['type'].startswith('data:'):
# Referenced data object found
# Copy annotation
_id = ann['value']
if _id not in objects:
try:
d_tmp = self.api.data(_id).get()
except slumber.exceptions.HttpClientError as ex:
if ex.response.status_code == 404:
continue
else:
raise ex
objects[_id] = GenData(d_tmp, self)
annotation = objects[_id].annotation
ref_annotation.update({path + '.' + k: v for k, v in annotation.items()})
remove_annotation.append(path)
if ref_annotation:
d.annotation.update(ref_annotation)
for path in remove_annotation:
del d.annotation[path]
else:
break
return data_objects | Query for Data object annotation. | Below is the the instruction that describes the task:
### Input:
Query for Data object annotation.
### Response:
def data(self, **query):
"""Query for Data object annotation."""
objects = self.cache['objects']
data = self.api.data.get(**query)['objects']
data_objects = []
for d in data:
_id = d['id']
if _id in objects:
# Update existing object
objects[_id].update(d)
else:
# Insert new object
objects[_id] = GenData(d, self)
data_objects.append(objects[_id])
# Hydrate reference fields
for d in data_objects:
count += 1
while True:
ref_annotation = {}
remove_annotation = []
for path, ann in d.annotation.items():
if ann['type'].startswith('data:'):
# Referenced data object found
# Copy annotation
_id = ann['value']
if _id not in objects:
try:
d_tmp = self.api.data(_id).get()
except slumber.exceptions.HttpClientError as ex:
if ex.response.status_code == 404:
continue
else:
raise ex
objects[_id] = GenData(d_tmp, self)
annotation = objects[_id].annotation
ref_annotation.update({path + '.' + k: v for k, v in annotation.items()})
remove_annotation.append(path)
if ref_annotation:
d.annotation.update(ref_annotation)
for path in remove_annotation:
del d.annotation[path]
else:
break
return data_objects |
def file_signature(filename):
"""
Return a signature for a file.
"""
if not os.path.isfile(filename):
return None
if not os.path.exists(filename):
return None
# Duplicate auto-generated files can be recognized with the sha1 hash.
sig = hashlib.sha1()
with open(filename, "rb") as f:
buf = f.read()
sig.update(buf)
return sig.hexdigest() | Return a signature for a file. | Below is the the instruction that describes the task:
### Input:
Return a signature for a file.
### Response:
def file_signature(filename):
"""
Return a signature for a file.
"""
if not os.path.isfile(filename):
return None
if not os.path.exists(filename):
return None
# Duplicate auto-generated files can be recognized with the sha1 hash.
sig = hashlib.sha1()
with open(filename, "rb") as f:
buf = f.read()
sig.update(buf)
return sig.hexdigest() |
def prepare_shell_data(self, shells, key, entry):
"""Prepare one shell or docker task."""
if self.can_process_shell(entry):
if key in ['python']:
entry['type'] = key
if 'with' in entry and isinstance(entry['with'], str):
rendered_with = ast.literal_eval(render(entry['with'],
variables=self.pipeline.variables,
model=self.pipeline.model,
env=self.get_merged_env(include_os=True)))
elif 'with' in entry:
rendered_with = entry['with']
else:
rendered_with = ['']
for item in rendered_with:
shells.append({
'id': self.next_task_id,
'creator': key,
'entry': entry,
'model': self.pipeline.model,
'env': self.get_merged_env(),
'item': item,
'dry_run': self.pipeline.options.dry_run,
'debug': self.pipeline.options.debug,
'strict': self.pipeline.options.strict,
'variables': self.pipeline.variables,
'temporary_scripts_path': self.pipeline.options.temporary_scripts_path})
self.next_task_id += 1 | Prepare one shell or docker task. | Below is the the instruction that describes the task:
### Input:
Prepare one shell or docker task.
### Response:
def prepare_shell_data(self, shells, key, entry):
"""Prepare one shell or docker task."""
if self.can_process_shell(entry):
if key in ['python']:
entry['type'] = key
if 'with' in entry and isinstance(entry['with'], str):
rendered_with = ast.literal_eval(render(entry['with'],
variables=self.pipeline.variables,
model=self.pipeline.model,
env=self.get_merged_env(include_os=True)))
elif 'with' in entry:
rendered_with = entry['with']
else:
rendered_with = ['']
for item in rendered_with:
shells.append({
'id': self.next_task_id,
'creator': key,
'entry': entry,
'model': self.pipeline.model,
'env': self.get_merged_env(),
'item': item,
'dry_run': self.pipeline.options.dry_run,
'debug': self.pipeline.options.debug,
'strict': self.pipeline.options.strict,
'variables': self.pipeline.variables,
'temporary_scripts_path': self.pipeline.options.temporary_scripts_path})
self.next_task_id += 1 |
def ws_connect(message):
"""
Channels connection setup.
Register the current client on the related Group according to the language
"""
prefix, language = message['path'].strip('/').split('/')
gr = Group('knocker-{0}'.format(language))
gr.add(message.reply_channel)
message.channel_session['knocker'] = language
message.reply_channel.send({"accept": True}) | Channels connection setup.
Register the current client on the related Group according to the language | Below is the the instruction that describes the task:
### Input:
Channels connection setup.
Register the current client on the related Group according to the language
### Response:
def ws_connect(message):
"""
Channels connection setup.
Register the current client on the related Group according to the language
"""
prefix, language = message['path'].strip('/').split('/')
gr = Group('knocker-{0}'.format(language))
gr.add(message.reply_channel)
message.channel_session['knocker'] = language
message.reply_channel.send({"accept": True}) |
def expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:
"""Edges between entities in the sub-graph that pass the given filters.
:param universe: The full graph
:param graph: A sub-graph to find the upstream information
:param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool
"""
edge_filter = and_edge_predicates(edge_predicates)
for u, v in itt.product(graph, repeat=2):
if graph.has_edge(u, v) or not universe.has_edge(u, v):
continue
rs = defaultdict(list)
for key, data in universe[u][v].items():
if not edge_filter(universe, u, v, key):
continue
rs[data[RELATION]].append((key, data))
if 1 == len(rs):
relation = list(rs)[0]
for key, data in rs[relation]:
graph.add_edge(u, v, key=key, **data)
else:
log.debug('Multiple relationship types found between %s and %s', u, v) | Edges between entities in the sub-graph that pass the given filters.
:param universe: The full graph
:param graph: A sub-graph to find the upstream information
:param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool | Below is the the instruction that describes the task:
### Input:
Edges between entities in the sub-graph that pass the given filters.
:param universe: The full graph
:param graph: A sub-graph to find the upstream information
:param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool
### Response:
def expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:
"""Edges between entities in the sub-graph that pass the given filters.
:param universe: The full graph
:param graph: A sub-graph to find the upstream information
:param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool
"""
edge_filter = and_edge_predicates(edge_predicates)
for u, v in itt.product(graph, repeat=2):
if graph.has_edge(u, v) or not universe.has_edge(u, v):
continue
rs = defaultdict(list)
for key, data in universe[u][v].items():
if not edge_filter(universe, u, v, key):
continue
rs[data[RELATION]].append((key, data))
if 1 == len(rs):
relation = list(rs)[0]
for key, data in rs[relation]:
graph.add_edge(u, v, key=key, **data)
else:
log.debug('Multiple relationship types found between %s and %s', u, v) |
def dump(
state, host,
remote_filename, database=None,
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_mysql_command(
executable='mysqldump',
database=database,
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
), remote_filename) | Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above | Below is the the instruction that describes the task:
### Input:
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above
### Response:
def dump(
state, host,
remote_filename, database=None,
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_mysql_command(
executable='mysqldump',
database=database,
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
), remote_filename) |
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(timezone.utcnow() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(timezone.utcnow() - self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit() | Override the scheduler heartbeat to determine when the test is complete | Below is the the instruction that describes the task:
### Input:
Override the scheduler heartbeat to determine when the test is complete
### Response:
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(timezone.utcnow() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(timezone.utcnow() - self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit() |
def create(self, create_missing=None):
"""Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1216236
<https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_.
"""
attrs = self.create_json(create_missing)
return Location(self._server_config, id=attrs['id']).read() | Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1216236
<https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_. | Below is the the instruction that describes the task:
### Input:
Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1216236
<https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_.
### Response:
def create(self, create_missing=None):
"""Manually fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1216236
<https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_.
"""
attrs = self.create_json(create_missing)
return Location(self._server_config, id=attrs['id']).read() |
def search_asn(self, auth, query, search_options=None):
""" Search ASNs for entries matching 'query'
* `auth` [BaseAuth]
AAA options.
* `query` [dict_to_sql]
How the search should be performed.
* `search_options` [options_dict]
Search options, see below.
Returns a list of dicts.
The `query` argument passed to this function is designed to be
able to specify how quite advanced search operations should be
performed in a generic format. It is internally expanded to a SQL
WHERE-clause.
The `query` is a dict with three elements, where one specifies the
operation to perform and the two other specifies its arguments. The
arguments can themselves be `query` dicts, to build more complex
queries.
The :attr:`operator` key specifies what operator should be used for the
comparison. Currently the following operators are supported:
* :data:`and` - Logical AND
* :data:`or` - Logical OR
* :data:`equals` - Equality; =
* :data:`not_equals` - Inequality; !=
* :data:`like` - SQL LIKE
* :data:`regex_match` - Regular expression match
* :data:`regex_not_match` - Regular expression not match
The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected
to the comparison. :attr:`val1` can be either any prefix attribute or an
entire query dict. :attr:`val2` can be either the value you want to
compare the prefix attribute to, or an entire `query` dict.
The search options can also be used to limit the number of rows
returned or set an offset for the result.
The following options are available:
* :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`).
* :attr:`offset` - Offset the result list this many prefixes (default :data:`0`).
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full
understanding.
"""
if search_options is None:
search_options = {}
#
# sanitize search options and set default if option missing
#
# max_result
if 'max_result' not in search_options:
search_options['max_result'] = 50
else:
try:
search_options['max_result'] = int(search_options['max_result'])
except (ValueError, TypeError):
raise NipapValueError('Invalid value for option' +
''' 'max_result'. Only integer values allowed.''')
# offset
if 'offset' not in search_options:
search_options['offset'] = 0
else:
try:
search_options['offset'] = int(search_options['offset'])
except (ValueError, TypeError):
raise NipapValueError('Invalid value for option' +
''' 'offset'. Only integer values allowed.''')
self._logger.debug('search_asn search_options: %s' % unicode(search_options))
opt = None
sql = """ SELECT * FROM ip_net_asn """
# add where clause if we have any search terms
if query != {}:
where, opt = self._expand_asn_query(query)
sql += " WHERE " + where
sql += " ORDER BY asn LIMIT " + unicode(search_options['max_result'])
self._execute(sql, opt)
result = list()
for row in self._curs_pg:
result.append(dict(row))
return { 'search_options': search_options, 'result': result } | Search ASNs for entries matching 'query'
* `auth` [BaseAuth]
AAA options.
* `query` [dict_to_sql]
How the search should be performed.
* `search_options` [options_dict]
Search options, see below.
Returns a list of dicts.
The `query` argument passed to this function is designed to be
able to specify how quite advanced search operations should be
performed in a generic format. It is internally expanded to a SQL
WHERE-clause.
The `query` is a dict with three elements, where one specifies the
operation to perform and the two other specifies its arguments. The
arguments can themselves be `query` dicts, to build more complex
queries.
The :attr:`operator` key specifies what operator should be used for the
comparison. Currently the following operators are supported:
* :data:`and` - Logical AND
* :data:`or` - Logical OR
* :data:`equals` - Equality; =
* :data:`not_equals` - Inequality; !=
* :data:`like` - SQL LIKE
* :data:`regex_match` - Regular expression match
* :data:`regex_not_match` - Regular expression not match
The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected
to the comparison. :attr:`val1` can be either any prefix attribute or an
entire query dict. :attr:`val2` can be either the value you want to
compare the prefix attribute to, or an entire `query` dict.
The search options can also be used to limit the number of rows
returned or set an offset for the result.
The following options are available:
* :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`).
* :attr:`offset` - Offset the result list this many prefixes (default :data:`0`).
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full
understanding. | Below is the the instruction that describes the task:
### Input:
Search ASNs for entries matching 'query'
* `auth` [BaseAuth]
AAA options.
* `query` [dict_to_sql]
How the search should be performed.
* `search_options` [options_dict]
Search options, see below.
Returns a list of dicts.
The `query` argument passed to this function is designed to be
able to specify how quite advanced search operations should be
performed in a generic format. It is internally expanded to a SQL
WHERE-clause.
The `query` is a dict with three elements, where one specifies the
operation to perform and the two other specifies its arguments. The
arguments can themselves be `query` dicts, to build more complex
queries.
The :attr:`operator` key specifies what operator should be used for the
comparison. Currently the following operators are supported:
* :data:`and` - Logical AND
* :data:`or` - Logical OR
* :data:`equals` - Equality; =
* :data:`not_equals` - Inequality; !=
* :data:`like` - SQL LIKE
* :data:`regex_match` - Regular expression match
* :data:`regex_not_match` - Regular expression not match
The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected
to the comparison. :attr:`val1` can be either any prefix attribute or an
entire query dict. :attr:`val2` can be either the value you want to
compare the prefix attribute to, or an entire `query` dict.
The search options can also be used to limit the number of rows
returned or set an offset for the result.
The following options are available:
* :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`).
* :attr:`offset` - Offset the result list this many prefixes (default :data:`0`).
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full
understanding.
### Response:
def search_asn(self, auth, query, search_options=None):
""" Search ASNs for entries matching 'query'
* `auth` [BaseAuth]
AAA options.
* `query` [dict_to_sql]
How the search should be performed.
* `search_options` [options_dict]
Search options, see below.
Returns a list of dicts.
The `query` argument passed to this function is designed to be
able to specify how quite advanced search operations should be
performed in a generic format. It is internally expanded to a SQL
WHERE-clause.
The `query` is a dict with three elements, where one specifies the
operation to perform and the two other specifies its arguments. The
arguments can themselves be `query` dicts, to build more complex
queries.
The :attr:`operator` key specifies what operator should be used for the
comparison. Currently the following operators are supported:
* :data:`and` - Logical AND
* :data:`or` - Logical OR
* :data:`equals` - Equality; =
* :data:`not_equals` - Inequality; !=
* :data:`like` - SQL LIKE
* :data:`regex_match` - Regular expression match
* :data:`regex_not_match` - Regular expression not match
The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected
to the comparison. :attr:`val1` can be either any prefix attribute or an
entire query dict. :attr:`val2` can be either the value you want to
compare the prefix attribute to, or an entire `query` dict.
The search options can also be used to limit the number of rows
returned or set an offset for the result.
The following options are available:
* :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`).
* :attr:`offset` - Offset the result list this many prefixes (default :data:`0`).
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full
understanding.
"""
if search_options is None:
search_options = {}
#
# sanitize search options and set default if option missing
#
# max_result
if 'max_result' not in search_options:
search_options['max_result'] = 50
else:
try:
search_options['max_result'] = int(search_options['max_result'])
except (ValueError, TypeError):
raise NipapValueError('Invalid value for option' +
''' 'max_result'. Only integer values allowed.''')
# offset
if 'offset' not in search_options:
search_options['offset'] = 0
else:
try:
search_options['offset'] = int(search_options['offset'])
except (ValueError, TypeError):
raise NipapValueError('Invalid value for option' +
''' 'offset'. Only integer values allowed.''')
self._logger.debug('search_asn search_options: %s' % unicode(search_options))
opt = None
sql = """ SELECT * FROM ip_net_asn """
# add where clause if we have any search terms
if query != {}:
where, opt = self._expand_asn_query(query)
sql += " WHERE " + where
sql += " ORDER BY asn LIMIT " + unicode(search_options['max_result'])
self._execute(sql, opt)
result = list()
for row in self._curs_pg:
result.append(dict(row))
return { 'search_options': search_options, 'result': result } |
def get_bitcoind_client():
"""
Connect to the bitcoind node
"""
bitcoind_opts = get_bitcoin_opts()
bitcoind_host = bitcoind_opts['bitcoind_server']
bitcoind_port = bitcoind_opts['bitcoind_port']
bitcoind_user = bitcoind_opts['bitcoind_user']
bitcoind_passwd = bitcoind_opts['bitcoind_passwd']
return create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port) | Connect to the bitcoind node | Below is the the instruction that describes the task:
### Input:
Connect to the bitcoind node
### Response:
def get_bitcoind_client():
"""
Connect to the bitcoind node
"""
bitcoind_opts = get_bitcoin_opts()
bitcoind_host = bitcoind_opts['bitcoind_server']
bitcoind_port = bitcoind_opts['bitcoind_port']
bitcoind_user = bitcoind_opts['bitcoind_user']
bitcoind_passwd = bitcoind_opts['bitcoind_passwd']
return create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port) |
def sum_distances(self, indices, distance_matrix):
"""Calculate combinatorial distance between a select group of
trajectories, indicated by indices
Arguments
---------
indices : tuple
distance_matrix : numpy.ndarray (M,M)
Returns
-------
numpy.ndarray
Notes
-----
This function can perhaps be quickened by calculating the sum of the
distances. The calculated distances, as they are right now,
are only used in a relative way. Purely summing distances would lead
to the same result, at a perhaps quicker rate.
"""
combs_tup = np.array(tuple(combinations(indices, 2)))
# Put indices from tuples into two-dimensional array.
combs = np.array([[i[0] for i in combs_tup],
[i[1] for i in combs_tup]])
# Calculate distance (vectorized)
dist = np.sqrt(
np.sum(np.square(distance_matrix[combs[0], combs[1]]), axis=0))
return dist | Calculate combinatorial distance between a select group of
trajectories, indicated by indices
Arguments
---------
indices : tuple
distance_matrix : numpy.ndarray (M,M)
Returns
-------
numpy.ndarray
Notes
-----
This function can perhaps be quickened by calculating the sum of the
distances. The calculated distances, as they are right now,
are only used in a relative way. Purely summing distances would lead
to the same result, at a perhaps quicker rate. | Below is the the instruction that describes the task:
### Input:
Calculate combinatorial distance between a select group of
trajectories, indicated by indices
Arguments
---------
indices : tuple
distance_matrix : numpy.ndarray (M,M)
Returns
-------
numpy.ndarray
Notes
-----
This function can perhaps be quickened by calculating the sum of the
distances. The calculated distances, as they are right now,
are only used in a relative way. Purely summing distances would lead
to the same result, at a perhaps quicker rate.
### Response:
def sum_distances(self, indices, distance_matrix):
"""Calculate combinatorial distance between a select group of
trajectories, indicated by indices
Arguments
---------
indices : tuple
distance_matrix : numpy.ndarray (M,M)
Returns
-------
numpy.ndarray
Notes
-----
This function can perhaps be quickened by calculating the sum of the
distances. The calculated distances, as they are right now,
are only used in a relative way. Purely summing distances would lead
to the same result, at a perhaps quicker rate.
"""
combs_tup = np.array(tuple(combinations(indices, 2)))
# Put indices from tuples into two-dimensional array.
combs = np.array([[i[0] for i in combs_tup],
[i[1] for i in combs_tup]])
# Calculate distance (vectorized)
dist = np.sqrt(
np.sum(np.square(distance_matrix[combs[0], combs[1]]), axis=0))
return dist |
def os_path_to_client_path(self, os_path):
"""
Converts an operating system path into a client path by
replacing instances of os.path.sep with '/'.
Note: If the client path contains any instances of '/'
already, they will be replaced with '-'.
"""
if os.path.sep == '/':
return os_path
return os_path.replace('/', '-').replace(os.path.sep, '/') | Converts an operating system path into a client path by
replacing instances of os.path.sep with '/'.
Note: If the client path contains any instances of '/'
already, they will be replaced with '-'. | Below is the the instruction that describes the task:
### Input:
Converts an operating system path into a client path by
replacing instances of os.path.sep with '/'.
Note: If the client path contains any instances of '/'
already, they will be replaced with '-'.
### Response:
def os_path_to_client_path(self, os_path):
"""
Converts an operating system path into a client path by
replacing instances of os.path.sep with '/'.
Note: If the client path contains any instances of '/'
already, they will be replaced with '-'.
"""
if os.path.sep == '/':
return os_path
return os_path.replace('/', '-').replace(os.path.sep, '/') |
def yank_last_arg(event):
"""
Like `yank_nth_arg`, but if no argument has been given, yank the last word
of each line.
"""
n = (event.arg if event.arg_present else None)
event.current_buffer.yank_last_arg(n) | Like `yank_nth_arg`, but if no argument has been given, yank the last word
of each line. | Below is the the instruction that describes the task:
### Input:
Like `yank_nth_arg`, but if no argument has been given, yank the last word
of each line.
### Response:
def yank_last_arg(event):
"""
Like `yank_nth_arg`, but if no argument has been given, yank the last word
of each line.
"""
n = (event.arg if event.arg_present else None)
event.current_buffer.yank_last_arg(n) |
def do_not_track():
"""
Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data
"""
def decorator(f):
@functools.wraps(f)
def func(*args, **kwargs):
request.prom_do_not_track = True
return f(*args, **kwargs)
return func
return decorator | Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data | Below is the the instruction that describes the task:
### Input:
Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data
### Response:
def do_not_track():
"""
Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data
"""
def decorator(f):
@functools.wraps(f)
def func(*args, **kwargs):
request.prom_do_not_track = True
return f(*args, **kwargs)
return func
return decorator |
def num_samples(input_filepath):
'''
Show number of samples (0 if unavailable).
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
n_samples : int
total number of samples in audio file.
Returns 0 if empty or unavailable
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 's')
if output == '0':
logger.warning("Number of samples unavailable for %s", input_filepath)
return int(output) | Show number of samples (0 if unavailable).
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
n_samples : int
total number of samples in audio file.
Returns 0 if empty or unavailable | Below is the the instruction that describes the task:
### Input:
Show number of samples (0 if unavailable).
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
n_samples : int
total number of samples in audio file.
Returns 0 if empty or unavailable
### Response:
def num_samples(input_filepath):
'''
Show number of samples (0 if unavailable).
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
n_samples : int
total number of samples in audio file.
Returns 0 if empty or unavailable
'''
validate_input_file(input_filepath)
output = soxi(input_filepath, 's')
if output == '0':
logger.warning("Number of samples unavailable for %s", input_filepath)
return int(output) |
def decode_and_resize(image_str_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
# Note resize expects a batch_size, but tf_map supresses that index,
# thus we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image | Decodes jpeg string, resizes it and returns a uint8 tensor. | Below is the the instruction that describes the task:
### Input:
Decodes jpeg string, resizes it and returns a uint8 tensor.
### Response:
def decode_and_resize(image_str_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
# Note resize expects a batch_size, but tf_map supresses that index,
# thus we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image |
def get_variable_values(
schema, # type: GraphQLSchema
definition_asts, # type: List[VariableDefinition]
inputs, # type: Any
):
# type: (...) -> Dict[str, Any]
"""Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input.
If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown."""
if inputs is None:
inputs = {}
values = {}
for def_ast in definition_asts:
var_name = def_ast.variable.name.value
var_type = type_from_ast(schema, def_ast.type)
value = inputs.get(var_name)
if not is_input_type(var_type):
raise GraphQLError(
'Variable "${var_name}" expected value of type "{var_type}" which cannot be used as an input type.'.format(
var_name=var_name, var_type=print_ast(def_ast.type)
),
[def_ast],
)
elif value is None:
if def_ast.default_value is not None:
values[var_name] = value_from_ast(
def_ast.default_value, var_type
) # type: ignore
if isinstance(var_type, GraphQLNonNull):
raise GraphQLError(
'Variable "${var_name}" of required type "{var_type}" was not provided.'.format(
var_name=var_name, var_type=var_type
),
[def_ast],
)
else:
errors = is_valid_value(value, var_type)
if errors:
message = u"\n" + u"\n".join(errors)
raise GraphQLError(
'Variable "${}" got invalid value {}.{}'.format(
var_name, json.dumps(value, sort_keys=True), message
),
[def_ast],
)
coerced_value = coerce_value(var_type, value)
if coerced_value is None:
raise Exception("Should have reported error.")
values[var_name] = coerced_value
return values | Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input.
If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown. | Below is the the instruction that describes the task:
### Input:
Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input.
If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown.
### Response:
def get_variable_values(
schema, # type: GraphQLSchema
definition_asts, # type: List[VariableDefinition]
inputs, # type: Any
):
# type: (...) -> Dict[str, Any]
"""Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input.
If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown."""
if inputs is None:
inputs = {}
values = {}
for def_ast in definition_asts:
var_name = def_ast.variable.name.value
var_type = type_from_ast(schema, def_ast.type)
value = inputs.get(var_name)
if not is_input_type(var_type):
raise GraphQLError(
'Variable "${var_name}" expected value of type "{var_type}" which cannot be used as an input type.'.format(
var_name=var_name, var_type=print_ast(def_ast.type)
),
[def_ast],
)
elif value is None:
if def_ast.default_value is not None:
values[var_name] = value_from_ast(
def_ast.default_value, var_type
) # type: ignore
if isinstance(var_type, GraphQLNonNull):
raise GraphQLError(
'Variable "${var_name}" of required type "{var_type}" was not provided.'.format(
var_name=var_name, var_type=var_type
),
[def_ast],
)
else:
errors = is_valid_value(value, var_type)
if errors:
message = u"\n" + u"\n".join(errors)
raise GraphQLError(
'Variable "${}" got invalid value {}.{}'.format(
var_name, json.dumps(value, sort_keys=True), message
),
[def_ast],
)
coerced_value = coerce_value(var_type, value)
if coerced_value is None:
raise Exception("Should have reported error.")
values[var_name] = coerced_value
return values |
def array_info(self, dump=None, paths=None, attrs=True,
standardize_dims=True, pwd=None, use_rel_paths=True,
alternative_paths={}, ds_description={'fname', 'store'},
full_ds=True, copy=False, **kwargs):
"""
Get dimension informations on you arrays
This method returns a dictionary containing informations on the
array in this instance
Parameters
----------
dump: bool
If True and the dataset has not been dumped so far, it is dumped to
a temporary file or the one generated by `paths` is used. If it is
False or both, `dump` and `paths` are None, no data will be stored.
If it is None and `paths` is not None, `dump` is set to True.
%(get_filename_ds.parameters.no_ds|dump)s
attrs: bool, optional
If True (default), the :attr:`ArrayList.attrs` and
:attr:`xarray.DataArray.attrs` attributes are included in the
returning dictionary
standardize_dims: bool, optional
If True (default), the real dimension names in the dataset are
replaced by x, y, z and t to be more general.
pwd: str
Path to the working directory from where the data can be imported.
If None, use the current working directory.
use_rel_paths: bool, optional
If True (default), paths relative to the current working directory
are used. Otherwise absolute paths to `pwd` are used
ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'}
Keys to describe the datasets of the arrays. If all, all keys
are used. The key descriptions are
fname
the file name is inserted in the ``'fname'`` key
store
the data store class and module is inserted in the ``'store'``
key
ds
the dataset is inserted in the ``'ds'`` key
num
The unique number assigned to the dataset is inserted in the
``'num'`` key
arr
The array itself is inserted in the ``'arr'`` key
full_ds: bool
If True and ``'ds'`` is in `ds_description`, the entire dataset is
included. Otherwise, only the DataArray converted to a dataset is
included
copy: bool
If True, the arrays and datasets are deep copied
Other Parameters
----------------
%(get_filename_ds.other_parameters)s
Returns
-------
OrderedDict
An ordered mapping from array names to dimensions and filename
corresponding to the array
See Also
--------
from_dict"""
saved_ds = kwargs.pop('_saved_ds', {})
def get_alternative(f):
return next(filter(lambda t: osp.samefile(f, t[0]),
six.iteritems(alternative_paths)), [False, f])
if copy:
def copy_obj(obj):
# try to get the number of the dataset and create only one copy
# copy for each dataset
try:
num = obj.psy.num
except AttributeError:
pass
else:
try:
return saved_ds[num]
except KeyError:
saved_ds[num] = obj.psy.copy(True)
return saved_ds[num]
return obj.psy.copy(True)
else:
def copy_obj(obj):
return obj
ret = OrderedDict()
if ds_description == 'all':
ds_description = {'fname', 'ds', 'num', 'arr', 'store'}
if paths is not None:
if dump is None:
dump = True
paths = iter(paths)
elif dump is None:
dump = False
if pwd is None:
pwd = getcwd()
for arr in self:
if isinstance(arr, InteractiveList):
ret[arr.arr_name] = arr.array_info(
dump, paths, pwd=pwd, attrs=attrs,
standardize_dims=standardize_dims,
use_rel_paths=use_rel_paths, ds_description=ds_description,
alternative_paths=alternative_paths, copy=copy,
_saved_ds=saved_ds, **kwargs)
else:
if standardize_dims:
idims = arr.psy.decoder.standardize_dims(
next(arr.psy.iter_base_variables), arr.psy.idims)
else:
idims = arr.psy.idims
ret[arr.psy.arr_name] = d = {'dims': idims}
if 'variable' in arr.coords:
d['name'] = [list(arr.coords['variable'].values)]
else:
d['name'] = arr.name
if 'fname' in ds_description or 'store' in ds_description:
fname, store_mod, store_cls = get_filename_ds(
arr.psy.base, dump=dump, paths=paths, **kwargs)
if 'store' in ds_description:
d['store'] = (store_mod, store_cls)
if 'fname' in ds_description:
d['fname'] = []
for i, f in enumerate(safe_list(fname)):
if (f is None or utils.is_remote_url(f)):
d['fname'].append(f)
else:
found, f = get_alternative(f)
if use_rel_paths:
f = osp.relpath(f, pwd)
else:
f = osp.abspath(f)
d['fname'].append(f)
if fname is None or isinstance(fname,
six.string_types):
d['fname'] = d['fname'][0]
else:
d['fname'] = tuple(safe_list(fname))
if arr.psy.base.psy._concat_dim is not None:
d['concat_dim'] = arr.psy.base.psy._concat_dim
if 'ds' in ds_description:
if full_ds:
d['ds'] = copy_obj(arr.psy.base)
else:
d['ds'] = copy_obj(arr.to_dataset())
if 'num' in ds_description:
d['num'] = arr.psy.base.psy.num
if 'arr' in ds_description:
d['arr'] = copy_obj(arr)
if attrs:
d['attrs'] = arr.attrs
ret['attrs'] = self.attrs
return ret | Get dimension informations on you arrays
This method returns a dictionary containing informations on the
array in this instance
Parameters
----------
dump: bool
If True and the dataset has not been dumped so far, it is dumped to
a temporary file or the one generated by `paths` is used. If it is
False or both, `dump` and `paths` are None, no data will be stored.
If it is None and `paths` is not None, `dump` is set to True.
%(get_filename_ds.parameters.no_ds|dump)s
attrs: bool, optional
If True (default), the :attr:`ArrayList.attrs` and
:attr:`xarray.DataArray.attrs` attributes are included in the
returning dictionary
standardize_dims: bool, optional
If True (default), the real dimension names in the dataset are
replaced by x, y, z and t to be more general.
pwd: str
Path to the working directory from where the data can be imported.
If None, use the current working directory.
use_rel_paths: bool, optional
If True (default), paths relative to the current working directory
are used. Otherwise absolute paths to `pwd` are used
ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'}
Keys to describe the datasets of the arrays. If all, all keys
are used. The key descriptions are
fname
the file name is inserted in the ``'fname'`` key
store
the data store class and module is inserted in the ``'store'``
key
ds
the dataset is inserted in the ``'ds'`` key
num
The unique number assigned to the dataset is inserted in the
``'num'`` key
arr
The array itself is inserted in the ``'arr'`` key
full_ds: bool
If True and ``'ds'`` is in `ds_description`, the entire dataset is
included. Otherwise, only the DataArray converted to a dataset is
included
copy: bool
If True, the arrays and datasets are deep copied
Other Parameters
----------------
%(get_filename_ds.other_parameters)s
Returns
-------
OrderedDict
An ordered mapping from array names to dimensions and filename
corresponding to the array
See Also
--------
from_dict | Below is the the instruction that describes the task:
### Input:
Get dimension informations on you arrays
This method returns a dictionary containing informations on the
array in this instance
Parameters
----------
dump: bool
If True and the dataset has not been dumped so far, it is dumped to
a temporary file or the one generated by `paths` is used. If it is
False or both, `dump` and `paths` are None, no data will be stored.
If it is None and `paths` is not None, `dump` is set to True.
%(get_filename_ds.parameters.no_ds|dump)s
attrs: bool, optional
If True (default), the :attr:`ArrayList.attrs` and
:attr:`xarray.DataArray.attrs` attributes are included in the
returning dictionary
standardize_dims: bool, optional
If True (default), the real dimension names in the dataset are
replaced by x, y, z and t to be more general.
pwd: str
Path to the working directory from where the data can be imported.
If None, use the current working directory.
use_rel_paths: bool, optional
If True (default), paths relative to the current working directory
are used. Otherwise absolute paths to `pwd` are used
ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'}
Keys to describe the datasets of the arrays. If all, all keys
are used. The key descriptions are
fname
the file name is inserted in the ``'fname'`` key
store
the data store class and module is inserted in the ``'store'``
key
ds
the dataset is inserted in the ``'ds'`` key
num
The unique number assigned to the dataset is inserted in the
``'num'`` key
arr
The array itself is inserted in the ``'arr'`` key
full_ds: bool
If True and ``'ds'`` is in `ds_description`, the entire dataset is
included. Otherwise, only the DataArray converted to a dataset is
included
copy: bool
If True, the arrays and datasets are deep copied
Other Parameters
----------------
%(get_filename_ds.other_parameters)s
Returns
-------
OrderedDict
An ordered mapping from array names to dimensions and filename
corresponding to the array
See Also
--------
from_dict
### Response:
def array_info(self, dump=None, paths=None, attrs=True,
standardize_dims=True, pwd=None, use_rel_paths=True,
alternative_paths={}, ds_description={'fname', 'store'},
full_ds=True, copy=False, **kwargs):
"""
Get dimension informations on you arrays
This method returns a dictionary containing informations on the
array in this instance
Parameters
----------
dump: bool
If True and the dataset has not been dumped so far, it is dumped to
a temporary file or the one generated by `paths` is used. If it is
False or both, `dump` and `paths` are None, no data will be stored.
If it is None and `paths` is not None, `dump` is set to True.
%(get_filename_ds.parameters.no_ds|dump)s
attrs: bool, optional
If True (default), the :attr:`ArrayList.attrs` and
:attr:`xarray.DataArray.attrs` attributes are included in the
returning dictionary
standardize_dims: bool, optional
If True (default), the real dimension names in the dataset are
replaced by x, y, z and t to be more general.
pwd: str
Path to the working directory from where the data can be imported.
If None, use the current working directory.
use_rel_paths: bool, optional
If True (default), paths relative to the current working directory
are used. Otherwise absolute paths to `pwd` are used
ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'}
Keys to describe the datasets of the arrays. If all, all keys
are used. The key descriptions are
fname
the file name is inserted in the ``'fname'`` key
store
the data store class and module is inserted in the ``'store'``
key
ds
the dataset is inserted in the ``'ds'`` key
num
The unique number assigned to the dataset is inserted in the
``'num'`` key
arr
The array itself is inserted in the ``'arr'`` key
full_ds: bool
If True and ``'ds'`` is in `ds_description`, the entire dataset is
included. Otherwise, only the DataArray converted to a dataset is
included
copy: bool
If True, the arrays and datasets are deep copied
Other Parameters
----------------
%(get_filename_ds.other_parameters)s
Returns
-------
OrderedDict
An ordered mapping from array names to dimensions and filename
corresponding to the array
See Also
--------
from_dict"""
saved_ds = kwargs.pop('_saved_ds', {})
def get_alternative(f):
return next(filter(lambda t: osp.samefile(f, t[0]),
six.iteritems(alternative_paths)), [False, f])
if copy:
def copy_obj(obj):
# try to get the number of the dataset and create only one copy
# copy for each dataset
try:
num = obj.psy.num
except AttributeError:
pass
else:
try:
return saved_ds[num]
except KeyError:
saved_ds[num] = obj.psy.copy(True)
return saved_ds[num]
return obj.psy.copy(True)
else:
def copy_obj(obj):
return obj
ret = OrderedDict()
if ds_description == 'all':
ds_description = {'fname', 'ds', 'num', 'arr', 'store'}
if paths is not None:
if dump is None:
dump = True
paths = iter(paths)
elif dump is None:
dump = False
if pwd is None:
pwd = getcwd()
for arr in self:
if isinstance(arr, InteractiveList):
ret[arr.arr_name] = arr.array_info(
dump, paths, pwd=pwd, attrs=attrs,
standardize_dims=standardize_dims,
use_rel_paths=use_rel_paths, ds_description=ds_description,
alternative_paths=alternative_paths, copy=copy,
_saved_ds=saved_ds, **kwargs)
else:
if standardize_dims:
idims = arr.psy.decoder.standardize_dims(
next(arr.psy.iter_base_variables), arr.psy.idims)
else:
idims = arr.psy.idims
ret[arr.psy.arr_name] = d = {'dims': idims}
if 'variable' in arr.coords:
d['name'] = [list(arr.coords['variable'].values)]
else:
d['name'] = arr.name
if 'fname' in ds_description or 'store' in ds_description:
fname, store_mod, store_cls = get_filename_ds(
arr.psy.base, dump=dump, paths=paths, **kwargs)
if 'store' in ds_description:
d['store'] = (store_mod, store_cls)
if 'fname' in ds_description:
d['fname'] = []
for i, f in enumerate(safe_list(fname)):
if (f is None or utils.is_remote_url(f)):
d['fname'].append(f)
else:
found, f = get_alternative(f)
if use_rel_paths:
f = osp.relpath(f, pwd)
else:
f = osp.abspath(f)
d['fname'].append(f)
if fname is None or isinstance(fname,
six.string_types):
d['fname'] = d['fname'][0]
else:
d['fname'] = tuple(safe_list(fname))
if arr.psy.base.psy._concat_dim is not None:
d['concat_dim'] = arr.psy.base.psy._concat_dim
if 'ds' in ds_description:
if full_ds:
d['ds'] = copy_obj(arr.psy.base)
else:
d['ds'] = copy_obj(arr.to_dataset())
if 'num' in ds_description:
d['num'] = arr.psy.base.psy.num
if 'arr' in ds_description:
d['arr'] = copy_obj(arr)
if attrs:
d['attrs'] = arr.attrs
ret['attrs'] = self.attrs
return ret |
def phaseshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, tolerate_nans=True, random_state=None):
"""Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on phase-randomized data.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get shifted version of data
shifted_data = phase_randomize(data, random_state=prng)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=True,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
# Roll subject axis of phase-randomized data
shifted_data = np.rollaxis(shifted_data, 2, 0)
shifted_isc = []
for s, shifted_subject in enumerate(shifted_data):
# ISC of shifted left-out subject vs mean of N-1 subjects
nonshifted_mean = np.mean(np.delete(data, s, axis=2),
axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False, summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic, axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=0)
return observed, p, distribution | Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on phase-randomized data.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True | Below is the the instruction that describes the task:
### Input:
Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on phase-randomized data.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True
### Response:
def phaseshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, tolerate_nans=True, random_state=None):
"""Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on phase-randomized data.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get shifted version of data
shifted_data = phase_randomize(data, random_state=prng)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=True,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
# Roll subject axis of phase-randomized data
shifted_data = np.rollaxis(shifted_data, 2, 0)
shifted_isc = []
for s, shifted_subject in enumerate(shifted_data):
# ISC of shifted left-out subject vs mean of N-1 subjects
nonshifted_mean = np.mean(np.delete(data, s, axis=2),
axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False, summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic, axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=0)
return observed, p, distribution |
def get_lightcurve(self, star_id, return_1d=True):
"""Get the light curves for the given ID
Parameters
----------
star_id : int
A valid integer star id representing an object in the dataset
return_1d : boolean (default=True)
Specify whether to return 1D arrays of (t, y, dy, filts) or
2D arrays of (t, y, dy) where each column is a filter.
Returns
-------
t, y, dy : np.ndarrays (if return_1d == False)
Times, magnitudes, and magnitude errors.
The shape of each array is [Nobs, 5], where the columns refer
to [u,g,r,i,z] bands. Non-observations are indicated by NaN.
t, y, dy, filts : np.ndarrays (if return_1d == True)
Times, magnitudes, magnitude errors, and filters
The shape of each array is [Nobs], and non-observations are
filtered out.
"""
filename = '{0}/{1}.dat'.format(self.dirname, star_id)
try:
data = np.loadtxt(self.data.extractfile(filename))
except KeyError:
raise ValueError("invalid star id: {0}".format(star_id))
RA = data[:, 0]
DEC = data[:, 1]
t = data[:, 2::3]
y = data[:, 3::3]
dy = data[:, 4::3]
nans = (y == -99.99)
t[nans] = np.nan
y[nans] = np.nan
dy[nans] = np.nan
if return_1d:
t, y, dy, filts = np.broadcast_arrays(t, y, dy,
['u', 'g', 'r', 'i', 'z'])
good = ~np.isnan(t)
return t[good], y[good], dy[good], filts[good]
else:
return t, y, dy | Get the light curves for the given ID
Parameters
----------
star_id : int
A valid integer star id representing an object in the dataset
return_1d : boolean (default=True)
Specify whether to return 1D arrays of (t, y, dy, filts) or
2D arrays of (t, y, dy) where each column is a filter.
Returns
-------
t, y, dy : np.ndarrays (if return_1d == False)
Times, magnitudes, and magnitude errors.
The shape of each array is [Nobs, 5], where the columns refer
to [u,g,r,i,z] bands. Non-observations are indicated by NaN.
t, y, dy, filts : np.ndarrays (if return_1d == True)
Times, magnitudes, magnitude errors, and filters
The shape of each array is [Nobs], and non-observations are
filtered out. | Below is the the instruction that describes the task:
### Input:
Get the light curves for the given ID
Parameters
----------
star_id : int
A valid integer star id representing an object in the dataset
return_1d : boolean (default=True)
Specify whether to return 1D arrays of (t, y, dy, filts) or
2D arrays of (t, y, dy) where each column is a filter.
Returns
-------
t, y, dy : np.ndarrays (if return_1d == False)
Times, magnitudes, and magnitude errors.
The shape of each array is [Nobs, 5], where the columns refer
to [u,g,r,i,z] bands. Non-observations are indicated by NaN.
t, y, dy, filts : np.ndarrays (if return_1d == True)
Times, magnitudes, magnitude errors, and filters
The shape of each array is [Nobs], and non-observations are
filtered out.
### Response:
def get_lightcurve(self, star_id, return_1d=True):
"""Get the light curves for the given ID
Parameters
----------
star_id : int
A valid integer star id representing an object in the dataset
return_1d : boolean (default=True)
Specify whether to return 1D arrays of (t, y, dy, filts) or
2D arrays of (t, y, dy) where each column is a filter.
Returns
-------
t, y, dy : np.ndarrays (if return_1d == False)
Times, magnitudes, and magnitude errors.
The shape of each array is [Nobs, 5], where the columns refer
to [u,g,r,i,z] bands. Non-observations are indicated by NaN.
t, y, dy, filts : np.ndarrays (if return_1d == True)
Times, magnitudes, magnitude errors, and filters
The shape of each array is [Nobs], and non-observations are
filtered out.
"""
filename = '{0}/{1}.dat'.format(self.dirname, star_id)
try:
data = np.loadtxt(self.data.extractfile(filename))
except KeyError:
raise ValueError("invalid star id: {0}".format(star_id))
RA = data[:, 0]
DEC = data[:, 1]
t = data[:, 2::3]
y = data[:, 3::3]
dy = data[:, 4::3]
nans = (y == -99.99)
t[nans] = np.nan
y[nans] = np.nan
dy[nans] = np.nan
if return_1d:
t, y, dy, filts = np.broadcast_arrays(t, y, dy,
['u', 'g', 'r', 'i', 'z'])
good = ~np.isnan(t)
return t[good], y[good], dy[good], filts[good]
else:
return t, y, dy |
def _pathway_feature_permutation(pathway_feature_tuples,
permutation_max_iters):
"""Permute the pathways across features for one side in the
network. Used in `permute_pathways_across_features`
Parameters
-----------
pathway_feature_tuples : list(tup(str, int))
a tuple list [(pathway, feature)] where the pathway, feature pairing
indicates that a pathway was overrepresented in that feature
permutation_max_iters : int
specify the maximum number of iterations, limit the number of attempts
we have to generate a permutation
Returns
-----------
list(tup(str, int)), the list of pathway, feature pairings after the
permutation
"""
pathways, features = [list(elements_at_position)
for elements_at_position in
zip(*pathway_feature_tuples)]
original_pathways = pathways[:]
random.shuffle(pathways)
feature_block_locations = {}
i = 0
while i < len(pathways):
starting_index = i
current_feature = features[i]
pathway_set = set()
# input is grouped by feature, so we want to keep track of the start
# and end of a given "block" of the same feature--this corresponds
# to all the pathways overrepresented in that feature.
while i < len(pathways) and features[i] == current_feature:
# check the results of the permutation. if `pathway_set` does
# not contain the current pathway, we are maintaining the
# necessary invariants in our permutation thus far.
if pathways[i] not in pathway_set:
pathway_set.add(pathways[i])
else:
k = 0
random_pathway = None
while True:
# select another random pathway from the list
# and get the feature to which it belongs
j = random.choice(range(0, len(pathways)))
random_pathway = pathways[j]
random_feature = features[j]
if (random_pathway != pathways[i] and
random_pathway not in pathway_set):
# if this is a feature we have not already seen,
# we are done.
if random_feature not in feature_block_locations:
break
# otherwise, look at the indices that correspond
# to that feature's block of pathways
feature_block_start, feature_block_end = \
feature_block_locations[random_feature]
pathway_block = pathways[feature_block_start:
feature_block_end]
# make sure that the current pathway is not in
# that block--ensures that we maintain the invariant
# after the swap
if pathways[i] not in pathway_block:
break
k += 1
if k > permutation_max_iters:
print("Permutation step: reached the maximum "
"number of iterations {0}.".format(
permutation_max_iters))
return None
pathway_set.add(random_pathway)
pathways[j] = pathways[i]
pathways[i] = random_pathway
i += 1
ending_index = i
feature_block_locations[current_feature] = (
starting_index, ending_index)
if original_pathways == pathways:
return None
return list(zip(pathways, features)) | Permute the pathways across features for one side in the
network. Used in `permute_pathways_across_features`
Parameters
-----------
pathway_feature_tuples : list(tup(str, int))
a tuple list [(pathway, feature)] where the pathway, feature pairing
indicates that a pathway was overrepresented in that feature
permutation_max_iters : int
specify the maximum number of iterations, limit the number of attempts
we have to generate a permutation
Returns
-----------
list(tup(str, int)), the list of pathway, feature pairings after the
permutation | Below is the the instruction that describes the task:
### Input:
Permute the pathways across features for one side in the
network. Used in `permute_pathways_across_features`
Parameters
-----------
pathway_feature_tuples : list(tup(str, int))
a tuple list [(pathway, feature)] where the pathway, feature pairing
indicates that a pathway was overrepresented in that feature
permutation_max_iters : int
specify the maximum number of iterations, limit the number of attempts
we have to generate a permutation
Returns
-----------
list(tup(str, int)), the list of pathway, feature pairings after the
permutation
### Response:
def _pathway_feature_permutation(pathway_feature_tuples,
permutation_max_iters):
"""Permute the pathways across features for one side in the
network. Used in `permute_pathways_across_features`
Parameters
-----------
pathway_feature_tuples : list(tup(str, int))
a tuple list [(pathway, feature)] where the pathway, feature pairing
indicates that a pathway was overrepresented in that feature
permutation_max_iters : int
specify the maximum number of iterations, limit the number of attempts
we have to generate a permutation
Returns
-----------
list(tup(str, int)), the list of pathway, feature pairings after the
permutation
"""
pathways, features = [list(elements_at_position)
for elements_at_position in
zip(*pathway_feature_tuples)]
original_pathways = pathways[:]
random.shuffle(pathways)
feature_block_locations = {}
i = 0
while i < len(pathways):
starting_index = i
current_feature = features[i]
pathway_set = set()
# input is grouped by feature, so we want to keep track of the start
# and end of a given "block" of the same feature--this corresponds
# to all the pathways overrepresented in that feature.
while i < len(pathways) and features[i] == current_feature:
# check the results of the permutation. if `pathway_set` does
# not contain the current pathway, we are maintaining the
# necessary invariants in our permutation thus far.
if pathways[i] not in pathway_set:
pathway_set.add(pathways[i])
else:
k = 0
random_pathway = None
while True:
# select another random pathway from the list
# and get the feature to which it belongs
j = random.choice(range(0, len(pathways)))
random_pathway = pathways[j]
random_feature = features[j]
if (random_pathway != pathways[i] and
random_pathway not in pathway_set):
# if this is a feature we have not already seen,
# we are done.
if random_feature not in feature_block_locations:
break
# otherwise, look at the indices that correspond
# to that feature's block of pathways
feature_block_start, feature_block_end = \
feature_block_locations[random_feature]
pathway_block = pathways[feature_block_start:
feature_block_end]
# make sure that the current pathway is not in
# that block--ensures that we maintain the invariant
# after the swap
if pathways[i] not in pathway_block:
break
k += 1
if k > permutation_max_iters:
print("Permutation step: reached the maximum "
"number of iterations {0}.".format(
permutation_max_iters))
return None
pathway_set.add(random_pathway)
pathways[j] = pathways[i]
pathways[i] = random_pathway
i += 1
ending_index = i
feature_block_locations[current_feature] = (
starting_index, ending_index)
if original_pathways == pathways:
return None
return list(zip(pathways, features)) |
def plot_report_from_path(path, success_name=DEFAULT_SUCCESS_NAME,
fail_names=DEFAULT_FAIL_NAMES, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
"""
Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot
"""
report = load(path)
plot_report(report, success_name, fail_names, label, is_max_confidence,
linewidth, plot_upper_bound) | Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot | Below is the the instruction that describes the task:
### Input:
Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot
### Response:
def plot_report_from_path(path, success_name=DEFAULT_SUCCESS_NAME,
fail_names=DEFAULT_FAIL_NAMES, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
"""
Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot
"""
report = load(path)
plot_report(report, success_name, fail_names, label, is_max_confidence,
linewidth, plot_upper_bound) |
def integer_list_file(cls, filename):
"""
Read a list of integers from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list( int )
@return: List of integers read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
try:
value = cls.integer(line)
except ValueError:
e = sys.exc_info()[1]
msg = "Error in line %d of %s: %s"
msg = msg % (count, filename, str(e))
raise ValueError(msg)
result.append(value)
return result | Read a list of integers from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list( int )
@return: List of integers read from the file. | Below is the the instruction that describes the task:
### Input:
Read a list of integers from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list( int )
@return: List of integers read from the file.
### Response:
def integer_list_file(cls, filename):
"""
Read a list of integers from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list( int )
@return: List of integers read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
try:
value = cls.integer(line)
except ValueError:
e = sys.exc_info()[1]
msg = "Error in line %d of %s: %s"
msg = msg % (count, filename, str(e))
raise ValueError(msg)
result.append(value)
return result |
def clear(*signals):
"""
Clears all callbacks for a particular signal or signals
"""
signals = signals if signals else receivers.keys()
for signal in signals:
receivers[signal].clear() | Clears all callbacks for a particular signal or signals | Below is the the instruction that describes the task:
### Input:
Clears all callbacks for a particular signal or signals
### Response:
def clear(*signals):
"""
Clears all callbacks for a particular signal or signals
"""
signals = signals if signals else receivers.keys()
for signal in signals:
receivers[signal].clear() |
def from_str(duration):
"""Parse a duration string to a datetime.timedelta"""
if duration in ("0", "+0", "-0"):
return datetime.timedelta()
pattern = re.compile('([\d\.]+)([a-zµμ]+)')
total = 0
sign = -1 if duration[0] == '-' else 1
matches = pattern.findall(duration)
if not len(matches):
raise Exception("Invalid duration {}".format(duration))
for (value, unit) in matches:
if unit not in units:
raise Exception(
"Unknown unit {} in duration {}".format(unit, duration))
try:
total += float(value) * units[unit]
except:
raise Exception(
"Invalid value {} in duration {}".format(value, duration))
microseconds = total / _microsecond_size
return datetime.timedelta(microseconds=sign * microseconds) | Parse a duration string to a datetime.timedelta | Below is the the instruction that describes the task:
### Input:
Parse a duration string to a datetime.timedelta
### Response:
def from_str(duration):
"""Parse a duration string to a datetime.timedelta"""
if duration in ("0", "+0", "-0"):
return datetime.timedelta()
pattern = re.compile('([\d\.]+)([a-zµμ]+)')
total = 0
sign = -1 if duration[0] == '-' else 1
matches = pattern.findall(duration)
if not len(matches):
raise Exception("Invalid duration {}".format(duration))
for (value, unit) in matches:
if unit not in units:
raise Exception(
"Unknown unit {} in duration {}".format(unit, duration))
try:
total += float(value) * units[unit]
except:
raise Exception(
"Invalid value {} in duration {}".format(value, duration))
microseconds = total / _microsecond_size
return datetime.timedelta(microseconds=sign * microseconds) |
def json(self) -> dict:
"""Returns json compatible state of the ButtonsFrame instance.
Returns json compatible state of the ButtonsFrame instance including
all nested buttons.
Returns:
control_json: Json representation of ButtonsFrame state.
"""
content = {}
if self.text:
content['text'] = self.text
content['controls'] = [control.json() for control in self.content]
self.control_json['content'] = content
return self.control_json | Returns json compatible state of the ButtonsFrame instance.
Returns json compatible state of the ButtonsFrame instance including
all nested buttons.
Returns:
control_json: Json representation of ButtonsFrame state. | Below is the the instruction that describes the task:
### Input:
Returns json compatible state of the ButtonsFrame instance.
Returns json compatible state of the ButtonsFrame instance including
all nested buttons.
Returns:
control_json: Json representation of ButtonsFrame state.
### Response:
def json(self) -> dict:
"""Returns json compatible state of the ButtonsFrame instance.
Returns json compatible state of the ButtonsFrame instance including
all nested buttons.
Returns:
control_json: Json representation of ButtonsFrame state.
"""
content = {}
if self.text:
content['text'] = self.text
content['controls'] = [control.json() for control in self.content]
self.control_json['content'] = content
return self.control_json |
def serialize_options(opts):
"""
A helper method to serialize and processes the options dictionary.
"""
options = (opts or {}).copy()
for key in opts.keys():
if key not in DEFAULT_OPTIONS:
LOG.warning("Unknown option passed to Flask-CORS: %s", key)
# Ensure origins is a list of allowed origins with at least one entry.
options['origins'] = sanitize_regex_param(options.get('origins'))
options['allow_headers'] = sanitize_regex_param(options.get('allow_headers'))
# This is expressly forbidden by the spec. Raise a value error so people
# don't get burned in production.
if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']:
raise ValueError("Cannot use supports_credentials in conjunction with"
"an origin string of '*'. See: "
"http://www.w3.org/TR/cors/#resource-requests")
serialize_option(options, 'expose_headers')
serialize_option(options, 'methods', upper=True)
if isinstance(options.get('max_age'), timedelta):
options['max_age'] = str(int(options['max_age'].total_seconds()))
return options | A helper method to serialize and processes the options dictionary. | Below is the the instruction that describes the task:
### Input:
A helper method to serialize and processes the options dictionary.
### Response:
def serialize_options(opts):
"""
A helper method to serialize and processes the options dictionary.
"""
options = (opts or {}).copy()
for key in opts.keys():
if key not in DEFAULT_OPTIONS:
LOG.warning("Unknown option passed to Flask-CORS: %s", key)
# Ensure origins is a list of allowed origins with at least one entry.
options['origins'] = sanitize_regex_param(options.get('origins'))
options['allow_headers'] = sanitize_regex_param(options.get('allow_headers'))
# This is expressly forbidden by the spec. Raise a value error so people
# don't get burned in production.
if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']:
raise ValueError("Cannot use supports_credentials in conjunction with"
"an origin string of '*'. See: "
"http://www.w3.org/TR/cors/#resource-requests")
serialize_option(options, 'expose_headers')
serialize_option(options, 'methods', upper=True)
if isinstance(options.get('max_age'), timedelta):
options['max_age'] = str(int(options['max_age'].total_seconds()))
return options |
def inq_compound(self, name):
"""
Return the number of fields and size (not yet) of a compound type.
"""
name = create_string_buffer(name)
self.library.inq_compound.argtypes = [c_char_p, POINTER(c_int)]
self.library.inq_compound.restype = None
nfields = c_int()
self.library.inq_compound(name, byref(nfields))
return nfields.value | Return the number of fields and size (not yet) of a compound type. | Below is the the instruction that describes the task:
### Input:
Return the number of fields and size (not yet) of a compound type.
### Response:
def inq_compound(self, name):
"""
Return the number of fields and size (not yet) of a compound type.
"""
name = create_string_buffer(name)
self.library.inq_compound.argtypes = [c_char_p, POINTER(c_int)]
self.library.inq_compound.restype = None
nfields = c_int()
self.library.inq_compound(name, byref(nfields))
return nfields.value |
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs):
"""
Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_many(filter_doc, **kwargs) | Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str | Below is the the instruction that describes the task:
### Input:
Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
### Response:
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs):
"""
Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_many(filter_doc, **kwargs) |
def authenticate_credentials(self, userargs, password, request=None):
"""
Authenticate the userargs and password against Django auth backends.
The "userargs" string may be just the username, or a querystring-encoded set of params.
"""
credentials = {
'password': password
}
if "=" not in userargs:
# if it doesn't seem to be in querystring format, just use it as the username
credentials[get_user_model().USERNAME_FIELD] = userargs
else:
# parse out the user args from querystring format into the credentials dict
for arg in userargs.split("&"):
key, val = arg.split("=")
credentials[key] = val
# authenticate the user via Django's auth backends
user = authenticate(**credentials)
if user is None:
raise exceptions.AuthenticationFailed('Invalid credentials.')
if not user.is_active:
raise exceptions.AuthenticationFailed('User inactive or deleted.')
return (user, None) | Authenticate the userargs and password against Django auth backends.
The "userargs" string may be just the username, or a querystring-encoded set of params. | Below is the the instruction that describes the task:
### Input:
Authenticate the userargs and password against Django auth backends.
The "userargs" string may be just the username, or a querystring-encoded set of params.
### Response:
def authenticate_credentials(self, userargs, password, request=None):
"""
Authenticate the userargs and password against Django auth backends.
The "userargs" string may be just the username, or a querystring-encoded set of params.
"""
credentials = {
'password': password
}
if "=" not in userargs:
# if it doesn't seem to be in querystring format, just use it as the username
credentials[get_user_model().USERNAME_FIELD] = userargs
else:
# parse out the user args from querystring format into the credentials dict
for arg in userargs.split("&"):
key, val = arg.split("=")
credentials[key] = val
# authenticate the user via Django's auth backends
user = authenticate(**credentials)
if user is None:
raise exceptions.AuthenticationFailed('Invalid credentials.')
if not user.is_active:
raise exceptions.AuthenticationFailed('User inactive or deleted.')
return (user, None) |
def download_mp4(from_idx, to_idx, _params):
"""
download mp4s
"""
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = 's' + str(idx)
save_folder = '{src_path}/{nm}'.format(src_path=_params['src_path'], nm=name)
if idx == 0 or os.path.isdir(save_folder):
continue
script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/video/{nm}.mpg_vcd.zip".format( \
nm=name)
down_sc = 'cd {src_path} && curl {script} --output {nm}.mpg_vcd.zip && \
unzip {nm}.mpg_vcd.zip'.format(script=script,
nm=name,
src_path=_params['src_path'])
try:
print(down_sc)
os.system(down_sc)
succ.add(idx)
except OSError as error:
print(error)
fail.add(idx)
return (succ, fail) | download mp4s | Below is the the instruction that describes the task:
### Input:
download mp4s
### Response:
def download_mp4(from_idx, to_idx, _params):
"""
download mp4s
"""
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = 's' + str(idx)
save_folder = '{src_path}/{nm}'.format(src_path=_params['src_path'], nm=name)
if idx == 0 or os.path.isdir(save_folder):
continue
script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/video/{nm}.mpg_vcd.zip".format( \
nm=name)
down_sc = 'cd {src_path} && curl {script} --output {nm}.mpg_vcd.zip && \
unzip {nm}.mpg_vcd.zip'.format(script=script,
nm=name,
src_path=_params['src_path'])
try:
print(down_sc)
os.system(down_sc)
succ.add(idx)
except OSError as error:
print(error)
fail.add(idx)
return (succ, fail) |
def _parse_prefix_as_idd(idd_pattern, number):
"""Strips the IDD from the start of the number if present.
Helper function used by _maybe_strip_i18n_prefix_and_normalize().
Returns a 2-tuple:
- Boolean indicating if IDD was stripped
- Number with IDD stripped
"""
match = idd_pattern.match(number)
if match:
match_end = match.end()
# Only strip this if the first digit after the match is not a 0, since
# country calling codes cannot begin with 0.
digit_match = _CAPTURING_DIGIT_PATTERN.search(number[match_end:])
if digit_match:
normalized_group = normalize_digits_only(digit_match.group(1))
if normalized_group == U_ZERO:
return (False, number)
return (True, number[match_end:])
return (False, number) | Strips the IDD from the start of the number if present.
Helper function used by _maybe_strip_i18n_prefix_and_normalize().
Returns a 2-tuple:
- Boolean indicating if IDD was stripped
- Number with IDD stripped | Below is the the instruction that describes the task:
### Input:
Strips the IDD from the start of the number if present.
Helper function used by _maybe_strip_i18n_prefix_and_normalize().
Returns a 2-tuple:
- Boolean indicating if IDD was stripped
- Number with IDD stripped
### Response:
def _parse_prefix_as_idd(idd_pattern, number):
"""Strips the IDD from the start of the number if present.
Helper function used by _maybe_strip_i18n_prefix_and_normalize().
Returns a 2-tuple:
- Boolean indicating if IDD was stripped
- Number with IDD stripped
"""
match = idd_pattern.match(number)
if match:
match_end = match.end()
# Only strip this if the first digit after the match is not a 0, since
# country calling codes cannot begin with 0.
digit_match = _CAPTURING_DIGIT_PATTERN.search(number[match_end:])
if digit_match:
normalized_group = normalize_digits_only(digit_match.group(1))
if normalized_group == U_ZERO:
return (False, number)
return (True, number[match_end:])
return (False, number) |
def request_all_data(cls, time, pressure=None, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The mandatory pressure level at which to request data (in hPa). If none is given,
all the available data in the profiles is returned.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, None, pressure, **kwargs)
return df | Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The mandatory pressure level at which to request data (in hPa). If none is given,
all the available data in the profiles is returned.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data | Below is the the instruction that describes the task:
### Input:
Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The mandatory pressure level at which to request data (in hPa). If none is given,
all the available data in the profiles is returned.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
### Response:
def request_all_data(cls, time, pressure=None, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The mandatory pressure level at which to request data (in hPa). If none is given,
all the available data in the profiles is returned.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, None, pressure, **kwargs)
return df |
def getTokensForText(self, body, POStags=None):
"""Get tokenized input text
Args:
body, str: The text to be tokenized (required)
POStags, str: Specify desired POS types (optional)
Returns:
list of str
Raises:
CorticalioException: if the request was not successful
"""
return self._text.getTokensForText(self._retina, body, POStags) | Get tokenized input text
Args:
body, str: The text to be tokenized (required)
POStags, str: Specify desired POS types (optional)
Returns:
list of str
Raises:
CorticalioException: if the request was not successful | Below is the the instruction that describes the task:
### Input:
Get tokenized input text
Args:
body, str: The text to be tokenized (required)
POStags, str: Specify desired POS types (optional)
Returns:
list of str
Raises:
CorticalioException: if the request was not successful
### Response:
def getTokensForText(self, body, POStags=None):
"""Get tokenized input text
Args:
body, str: The text to be tokenized (required)
POStags, str: Specify desired POS types (optional)
Returns:
list of str
Raises:
CorticalioException: if the request was not successful
"""
return self._text.getTokensForText(self._retina, body, POStags) |
def pretty (self):
'''This returns a copy of the screen as a unicode string with an ASCII
text box around the screen border. This is similar to
__str__/__unicode__ except that it adds a box.'''
top_bot = u'+' + u'-'*self.cols + u'+\n'
return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot | This returns a copy of the screen as a unicode string with an ASCII
text box around the screen border. This is similar to
__str__/__unicode__ except that it adds a box. | Below is the the instruction that describes the task:
### Input:
This returns a copy of the screen as a unicode string with an ASCII
text box around the screen border. This is similar to
__str__/__unicode__ except that it adds a box.
### Response:
def pretty (self):
'''This returns a copy of the screen as a unicode string with an ASCII
text box around the screen border. This is similar to
__str__/__unicode__ except that it adds a box.'''
top_bot = u'+' + u'-'*self.cols + u'+\n'
return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot |
def convexHull(actor_or_list, alphaConstant=0):
"""
Create a 3D Delaunay triangulation of input points.
:param actor_or_list: can be either an ``Actor`` or a list of 3D points.
:param float alphaConstant: For a non-zero alpha value, only verts, edges, faces,
or tetra contained within the circumsphere (of radius alpha) will be output.
Otherwise, only tetrahedra will be output.
.. hint:: |convexHull| |convexHull.py|_
"""
if vu.isSequence(actor_or_list):
actor = vs.Points(actor_or_list)
else:
actor = actor_or_list
apoly = actor.clean().polydata()
triangleFilter = vtk.vtkTriangleFilter()
triangleFilter.SetInputData(apoly)
triangleFilter.Update()
poly = triangleFilter.GetOutput()
delaunay = vtk.vtkDelaunay3D() # Create the convex hull of the pointcloud
if alphaConstant:
delaunay.SetAlpha(alphaConstant)
delaunay.SetInputData(poly)
delaunay.Update()
surfaceFilter = vtk.vtkDataSetSurfaceFilter()
surfaceFilter.SetInputConnection(delaunay.GetOutputPort())
surfaceFilter.Update()
chuact = Actor(surfaceFilter.GetOutput())
return chuact | Create a 3D Delaunay triangulation of input points.
:param actor_or_list: can be either an ``Actor`` or a list of 3D points.
:param float alphaConstant: For a non-zero alpha value, only verts, edges, faces,
or tetra contained within the circumsphere (of radius alpha) will be output.
Otherwise, only tetrahedra will be output.
.. hint:: |convexHull| |convexHull.py|_ | Below is the the instruction that describes the task:
### Input:
Create a 3D Delaunay triangulation of input points.
:param actor_or_list: can be either an ``Actor`` or a list of 3D points.
:param float alphaConstant: For a non-zero alpha value, only verts, edges, faces,
or tetra contained within the circumsphere (of radius alpha) will be output.
Otherwise, only tetrahedra will be output.
.. hint:: |convexHull| |convexHull.py|_
### Response:
def convexHull(actor_or_list, alphaConstant=0):
"""
Create a 3D Delaunay triangulation of input points.
:param actor_or_list: can be either an ``Actor`` or a list of 3D points.
:param float alphaConstant: For a non-zero alpha value, only verts, edges, faces,
or tetra contained within the circumsphere (of radius alpha) will be output.
Otherwise, only tetrahedra will be output.
.. hint:: |convexHull| |convexHull.py|_
"""
if vu.isSequence(actor_or_list):
actor = vs.Points(actor_or_list)
else:
actor = actor_or_list
apoly = actor.clean().polydata()
triangleFilter = vtk.vtkTriangleFilter()
triangleFilter.SetInputData(apoly)
triangleFilter.Update()
poly = triangleFilter.GetOutput()
delaunay = vtk.vtkDelaunay3D() # Create the convex hull of the pointcloud
if alphaConstant:
delaunay.SetAlpha(alphaConstant)
delaunay.SetInputData(poly)
delaunay.Update()
surfaceFilter = vtk.vtkDataSetSurfaceFilter()
surfaceFilter.SetInputConnection(delaunay.GetOutputPort())
surfaceFilter.Update()
chuact = Actor(surfaceFilter.GetOutput())
return chuact |
def select_day(self, day):
"""选取日期(一般用于分钟线)
Arguments:
day {[type]} -- [description]
Raises:
ValueError -- [description]
Returns:
[type] -- [description]
"""
def _select_day(day):
return self.data.loc[day, slice(None)]
try:
return self.new(_select_day(day), self.type, self.if_fq)
except:
raise ValueError('QA CANNOT GET THIS Day {} '.format(day)) | 选取日期(一般用于分钟线)
Arguments:
day {[type]} -- [description]
Raises:
ValueError -- [description]
Returns:
[type] -- [description] | Below is the the instruction that describes the task:
### Input:
选取日期(一般用于分钟线)
Arguments:
day {[type]} -- [description]
Raises:
ValueError -- [description]
Returns:
[type] -- [description]
### Response:
def select_day(self, day):
"""选取日期(一般用于分钟线)
Arguments:
day {[type]} -- [description]
Raises:
ValueError -- [description]
Returns:
[type] -- [description]
"""
def _select_day(day):
return self.data.loc[day, slice(None)]
try:
return self.new(_select_day(day), self.type, self.if_fq)
except:
raise ValueError('QA CANNOT GET THIS Day {} '.format(day)) |
def mergebydepth(args):
"""
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
"""
p = OptionParser(mergebydepth.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth required")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + ".d{0}".format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, "--minaccn={0}".format(mindepth),
"--outfile={0}".format(bedgraphfiltered)])
merged = bedgraphfiltered + ".merge.fasta"
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True) | %prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth. | Below is the the instruction that describes the task:
### Input:
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
### Response:
def mergebydepth(args):
"""
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
"""
p = OptionParser(mergebydepth.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth required")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + ".d{0}".format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, "--minaccn={0}".format(mindepth),
"--outfile={0}".format(bedgraphfiltered)])
merged = bedgraphfiltered + ".merge.fasta"
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True) |
def generate_common(env):
"""Add internal Builders and construction variables for LaTeX to an Environment."""
# Add OSX system paths so TeX tools can be found
# when a list of tools is given the exists() method is not called
generate_darwin(env)
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run Biber on a file.
global BiberAction
if BiberAction is None:
BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
# Define an action to run MakeIndex on a file for acronyms.
global MakeAcronymsAction
if MakeAcronymsAction is None:
MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR")
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Some Linux platforms have pdflatex set up in a way
# that requires that the HOME environment variable be set.
# Add it here if defined.
v = os.environ.get('HOME')
if v:
environ['HOME'] = v
CDCOM = 'cd '
if platform.system() == 'Windows':
# allow cd command to change drives on Windows
CDCOM = 'cd /D '
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 4
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['BIBER'] = 'biber'
env['BIBERFLAGS'] = SCons.Util.CLVar('')
env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKEACRONYMS'] = 'makeindex'
env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg')
env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = 'nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
env['MAKENEWGLOSSARY'] = 'makeindex'
env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY ' | Add internal Builders and construction variables for LaTeX to an Environment. | Below is the the instruction that describes the task:
### Input:
Add internal Builders and construction variables for LaTeX to an Environment.
### Response:
def generate_common(env):
"""Add internal Builders and construction variables for LaTeX to an Environment."""
# Add OSX system paths so TeX tools can be found
# when a list of tools is given the exists() method is not called
generate_darwin(env)
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run Biber on a file.
global BiberAction
if BiberAction is None:
BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
# Define an action to run MakeIndex on a file for acronyms.
global MakeAcronymsAction
if MakeAcronymsAction is None:
MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR")
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Some Linux platforms have pdflatex set up in a way
# that requires that the HOME environment variable be set.
# Add it here if defined.
v = os.environ.get('HOME')
if v:
environ['HOME'] = v
CDCOM = 'cd '
if platform.system() == 'Windows':
# allow cd command to change drives on Windows
CDCOM = 'cd /D '
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 4
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['BIBER'] = 'biber'
env['BIBERFLAGS'] = SCons.Util.CLVar('')
env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKEACRONYMS'] = 'makeindex'
env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg')
env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = 'nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
env['MAKENEWGLOSSARY'] = 'makeindex'
env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY ' |
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
"""Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
"""
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata | Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr() | Below is the the instruction that describes the task:
### Input:
Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
### Response:
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
"""Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
"""
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata |
def info(path):
"""info(path)
path is a string
"""
fh = None
try:
fh = open(path,'rb')
return __readHeader(fh)
finally:
if fh:
fh.close()
return None | info(path)
path is a string | Below is the the instruction that describes the task:
### Input:
info(path)
path is a string
### Response:
def info(path):
"""info(path)
path is a string
"""
fh = None
try:
fh = open(path,'rb')
return __readHeader(fh)
finally:
if fh:
fh.close()
return None |
def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True):
""" Simple, multi-purpose depth-first search.
Visits all the nodes connected to the root, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and ubsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
Note: node._visited is expected to be False for all nodes.
"""
stop = visit(root)
root._visited = True
for node in root.links:
if stop: return True
if not traversable(root, root.links.edge(node)): continue
if not node._visited:
stop = depth_first_search(node, visit, traversable)
return stop | Simple, multi-purpose depth-first search.
Visits all the nodes connected to the root, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and ubsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
Note: node._visited is expected to be False for all nodes. | Below is the the instruction that describes the task:
### Input:
Simple, multi-purpose depth-first search.
Visits all the nodes connected to the root, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and ubsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
Note: node._visited is expected to be False for all nodes.
### Response:
def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True):
""" Simple, multi-purpose depth-first search.
Visits all the nodes connected to the root, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and ubsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
Note: node._visited is expected to be False for all nodes.
"""
stop = visit(root)
root._visited = True
for node in root.links:
if stop: return True
if not traversable(root, root.links.edge(node)): continue
if not node._visited:
stop = depth_first_search(node, visit, traversable)
return stop |
def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype="uint32")
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0
else:
v2[i] = 1
return v2 | Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector | Below is the the instruction that describes the task:
### Input:
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
### Response:
def corruptVector(v1, noiseLevel, numActiveCols):
"""
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits.
@param v1 (array) binary vector whose copy will be corrupted
@param noiseLevel (float) amount of noise to be applied on the new vector
@param numActiveCols (int) number of sparse columns that represent an input
@return v2 (array) corrupted binary vector
"""
size = len(v1)
v2 = np.zeros(size, dtype="uint32")
bitsToSwap = int(noiseLevel * numActiveCols)
# Copy the contents of v1 into v2
for i in range(size):
v2[i] = v1[i]
for _ in range(bitsToSwap):
i = random.randrange(size)
if v2[i] == 1:
v2[i] = 0
else:
v2[i] = 1
return v2 |
def value(self):
"""gets the color value"""
return {
"type" : self._type,
"style" : self._style,
"color" : self._color.value,
"width" : self._width
} | gets the color value | Below is the the instruction that describes the task:
### Input:
gets the color value
### Response:
def value(self):
"""gets the color value"""
return {
"type" : self._type,
"style" : self._style,
"color" : self._color.value,
"width" : self._width
} |
def weight(w, sparsity):
"""Weight-level magnitude pruning."""
w_shape = common_layers.shape_list(w)
k = int(np.prod(w_shape[:-1]))
count = tf.to_int32(k * sparsity)
mask = common_layers.weight_targeting(w, count)
return (1 - mask) * w | Weight-level magnitude pruning. | Below is the the instruction that describes the task:
### Input:
Weight-level magnitude pruning.
### Response:
def weight(w, sparsity):
"""Weight-level magnitude pruning."""
w_shape = common_layers.shape_list(w)
k = int(np.prod(w_shape[:-1]))
count = tf.to_int32(k * sparsity)
mask = common_layers.weight_targeting(w, count)
return (1 - mask) * w |
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
if model['type'] != 'notebook':
return super(TextFileContentsManager, self).save(model, path)
nbk = model['content']
try:
metadata = nbk.get('metadata')
rearrange_jupytext_metadata(metadata)
jupytext_metadata = metadata.setdefault('jupytext', {})
jupytext_formats = jupytext_metadata.get('formats') or self.default_formats(path)
if not jupytext_formats:
text_representation = jupytext_metadata.get('text_representation', {})
ext = os.path.splitext(path)[1]
fmt = {'extension': ext}
if ext == text_representation.get('extension') and text_representation.get('format_name'):
fmt['format_name'] = text_representation.get('format_name')
jupytext_formats = [fmt]
jupytext_formats = long_form_multiple_formats(jupytext_formats, metadata)
# Set preferred formats if not format name is given yet
jupytext_formats = [preferred_format(fmt, self.preferred_jupytext_formats_save) for fmt in jupytext_formats]
base, fmt = find_base_path_and_format(path, jupytext_formats)
self.update_paired_notebooks(path, fmt, jupytext_formats)
self.set_default_format_options(jupytext_metadata)
if not jupytext_metadata:
metadata.pop('jupytext')
# Save as ipynb first
return_value = None
value = None
for fmt in jupytext_formats[::-1]:
if fmt['extension'] != '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
self.log.info("Saving %s", os.path.basename(alt_path))
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# And then to the other formats, in reverse order so that
# the first format is the most recent
for fmt in jupytext_formats[::-1]:
if fmt['extension'] == '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
if 'format_name' in fmt and fmt['extension'] not in ['.Rmd', '.md']:
self.log.info("Saving %s in format %s:%s",
os.path.basename(alt_path), fmt['extension'][1:], fmt['format_name'])
else:
self.log.info("Saving %s", os.path.basename(alt_path))
with mock.patch('nbformat.writes', _jupytext_writes(fmt)):
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# Update modified timestamp to match that of the pair #207
return_value['last_modified'] = value['last_modified']
return return_value
except Exception as err:
raise HTTPError(400, str(err)) | Save the file model and return the model with no content. | Below is the the instruction that describes the task:
### Input:
Save the file model and return the model with no content.
### Response:
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
if model['type'] != 'notebook':
return super(TextFileContentsManager, self).save(model, path)
nbk = model['content']
try:
metadata = nbk.get('metadata')
rearrange_jupytext_metadata(metadata)
jupytext_metadata = metadata.setdefault('jupytext', {})
jupytext_formats = jupytext_metadata.get('formats') or self.default_formats(path)
if not jupytext_formats:
text_representation = jupytext_metadata.get('text_representation', {})
ext = os.path.splitext(path)[1]
fmt = {'extension': ext}
if ext == text_representation.get('extension') and text_representation.get('format_name'):
fmt['format_name'] = text_representation.get('format_name')
jupytext_formats = [fmt]
jupytext_formats = long_form_multiple_formats(jupytext_formats, metadata)
# Set preferred formats if not format name is given yet
jupytext_formats = [preferred_format(fmt, self.preferred_jupytext_formats_save) for fmt in jupytext_formats]
base, fmt = find_base_path_and_format(path, jupytext_formats)
self.update_paired_notebooks(path, fmt, jupytext_formats)
self.set_default_format_options(jupytext_metadata)
if not jupytext_metadata:
metadata.pop('jupytext')
# Save as ipynb first
return_value = None
value = None
for fmt in jupytext_formats[::-1]:
if fmt['extension'] != '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
self.log.info("Saving %s", os.path.basename(alt_path))
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# And then to the other formats, in reverse order so that
# the first format is the most recent
for fmt in jupytext_formats[::-1]:
if fmt['extension'] == '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
if 'format_name' in fmt and fmt['extension'] not in ['.Rmd', '.md']:
self.log.info("Saving %s in format %s:%s",
os.path.basename(alt_path), fmt['extension'][1:], fmt['format_name'])
else:
self.log.info("Saving %s", os.path.basename(alt_path))
with mock.patch('nbformat.writes', _jupytext_writes(fmt)):
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# Update modified timestamp to match that of the pair #207
return_value['last_modified'] = value['last_modified']
return return_value
except Exception as err:
raise HTTPError(400, str(err)) |
def send(self, command, _id=None, result={}, frames=[], threads=None,
error_messages=[], warning_messages=[], info_messages=[],
exception=None):
""" Build a message from parameters and send it to debugger.
:param command: The command sent to the debugger client.
:type command: str
:param _id: Unique id of the sent message. Right now, it's always `None`
for messages by debugger to client.
:type _id: int
:param result: Used to send `exit_code` and updated `executionStatus`
to debugger client.
:type result: dict
:param frames: contains the complete stack frames when debugger sends
the `programBreak` message.
:type frames: list
:param error_messages: A list of error messages the debugger client must
display to the user.
:type error_messages: list of str
:param warning_messages: A list of warning messages the debugger client
must display to the user.
:type warning_messages: list of str
:param info_messages: A list of info messages the debugger client must
display to the user.
:type info_messages: list of str
:param exception: If debugger encounter an exception, this dict contains
2 keys: `type` and `info` (the later is the message).
:type exception: dict
"""
with self._connection_lock:
payload = {
'_id': _id,
'command': command,
'result': result,
'commandExecStatus': 'ok',
'frames': frames,
'info_messages': info_messages,
'warning_messages': warning_messages,
'error_messages': error_messages,
'exception': exception
}
if threads:
payload['threads'] = threads
msg = self.encode(payload)
if self._connection:
msg_bytes = bytearray(msg, 'utf-8')
send_bytes_count = self._connection.sendall(msg_bytes)
self.log_sent(msg)
return send_bytes_count
raise IKPdbConnectionError("Connection lost!") | Build a message from parameters and send it to debugger.
:param command: The command sent to the debugger client.
:type command: str
:param _id: Unique id of the sent message. Right now, it's always `None`
for messages by debugger to client.
:type _id: int
:param result: Used to send `exit_code` and updated `executionStatus`
to debugger client.
:type result: dict
:param frames: contains the complete stack frames when debugger sends
the `programBreak` message.
:type frames: list
:param error_messages: A list of error messages the debugger client must
display to the user.
:type error_messages: list of str
:param warning_messages: A list of warning messages the debugger client
must display to the user.
:type warning_messages: list of str
:param info_messages: A list of info messages the debugger client must
display to the user.
:type info_messages: list of str
:param exception: If debugger encounter an exception, this dict contains
2 keys: `type` and `info` (the later is the message).
:type exception: dict | Below is the the instruction that describes the task:
### Input:
Build a message from parameters and send it to debugger.
:param command: The command sent to the debugger client.
:type command: str
:param _id: Unique id of the sent message. Right now, it's always `None`
for messages by debugger to client.
:type _id: int
:param result: Used to send `exit_code` and updated `executionStatus`
to debugger client.
:type result: dict
:param frames: contains the complete stack frames when debugger sends
the `programBreak` message.
:type frames: list
:param error_messages: A list of error messages the debugger client must
display to the user.
:type error_messages: list of str
:param warning_messages: A list of warning messages the debugger client
must display to the user.
:type warning_messages: list of str
:param info_messages: A list of info messages the debugger client must
display to the user.
:type info_messages: list of str
:param exception: If debugger encounter an exception, this dict contains
2 keys: `type` and `info` (the later is the message).
:type exception: dict
### Response:
def send(self, command, _id=None, result={}, frames=[], threads=None,
error_messages=[], warning_messages=[], info_messages=[],
exception=None):
""" Build a message from parameters and send it to debugger.
:param command: The command sent to the debugger client.
:type command: str
:param _id: Unique id of the sent message. Right now, it's always `None`
for messages by debugger to client.
:type _id: int
:param result: Used to send `exit_code` and updated `executionStatus`
to debugger client.
:type result: dict
:param frames: contains the complete stack frames when debugger sends
the `programBreak` message.
:type frames: list
:param error_messages: A list of error messages the debugger client must
display to the user.
:type error_messages: list of str
:param warning_messages: A list of warning messages the debugger client
must display to the user.
:type warning_messages: list of str
:param info_messages: A list of info messages the debugger client must
display to the user.
:type info_messages: list of str
:param exception: If debugger encounter an exception, this dict contains
2 keys: `type` and `info` (the later is the message).
:type exception: dict
"""
with self._connection_lock:
payload = {
'_id': _id,
'command': command,
'result': result,
'commandExecStatus': 'ok',
'frames': frames,
'info_messages': info_messages,
'warning_messages': warning_messages,
'error_messages': error_messages,
'exception': exception
}
if threads:
payload['threads'] = threads
msg = self.encode(payload)
if self._connection:
msg_bytes = bytearray(msg, 'utf-8')
send_bytes_count = self._connection.sendall(msg_bytes)
self.log_sent(msg)
return send_bytes_count
raise IKPdbConnectionError("Connection lost!") |
def calculate_one_hot_encoder_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, 1] ---> [N, C']
C' is the total number of categorical values.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
if operator.inputs[0].type.shape[1] != 1 or len(operator.inputs[0].type.shape) > 2:
raise RuntimeError('Input must be [N, 1]-tensor')
int_categories = operator.raw_operator.oneHotEncoder.int64Categories.vector
str_categories = operator.raw_operator.oneHotEncoder.stringCategories.vector
N = operator.inputs[0].type.shape[0]
if len(int_categories) > 0:
operator.outputs[0].type = FloatTensorType([N, len(int_categories)],
doc_string=operator.outputs[0].type.doc_string)
elif len(str_categories) > 0 and type(operator.inputs[0].type) == StringTensorType:
operator.outputs[0].type = FloatTensorType([N, len(str_categories)],
doc_string=operator.outputs[0].type.doc_string)
else:
raise ValueError('Categorical indexes are missing') | Allowed input/output patterns are
1. [N, 1] ---> [N, C']
C' is the total number of categorical values. | Below is the the instruction that describes the task:
### Input:
Allowed input/output patterns are
1. [N, 1] ---> [N, C']
C' is the total number of categorical values.
### Response:
def calculate_one_hot_encoder_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, 1] ---> [N, C']
C' is the total number of categorical values.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
if operator.inputs[0].type.shape[1] != 1 or len(operator.inputs[0].type.shape) > 2:
raise RuntimeError('Input must be [N, 1]-tensor')
int_categories = operator.raw_operator.oneHotEncoder.int64Categories.vector
str_categories = operator.raw_operator.oneHotEncoder.stringCategories.vector
N = operator.inputs[0].type.shape[0]
if len(int_categories) > 0:
operator.outputs[0].type = FloatTensorType([N, len(int_categories)],
doc_string=operator.outputs[0].type.doc_string)
elif len(str_categories) > 0 and type(operator.inputs[0].type) == StringTensorType:
operator.outputs[0].type = FloatTensorType([N, len(str_categories)],
doc_string=operator.outputs[0].type.doc_string)
else:
raise ValueError('Categorical indexes are missing') |
def format_lp(nodes, constraints_x, qa, constraints_y, qb):
"""
Maximize
4 x1 + 2 x2 + 3 x3 + x4
Subject To
x1 + x2 <= 1
End
"""
lp_handle = cStringIO.StringIO()
lp_handle.write("Maximize\n ")
records = 0
for i, score in nodes:
lp_handle.write("+ %d x%d " % (score, i))
# SCIP does not like really long string per row
records += 1
if records % 10 == 0:
lp_handle.write("\n")
lp_handle.write("\n")
num_of_constraints = 0
lp_handle.write("Subject To\n")
for c in constraints_x:
additions = " + ".join("x%d" % (x+1) for x in c)
lp_handle.write(" %s <= %d\n" % (additions, qa))
num_of_constraints += len(constraints_x)
# non-self
if not (constraints_x is constraints_y):
for c in constraints_y:
additions = " + ".join("x%d" % (x+1) for x in c)
lp_handle.write(" %s <= %d\n" % (additions, qb))
num_of_constraints += len(constraints_y)
print("number of variables (%d), number of constraints (%d)" %
(len(nodes), num_of_constraints), file=sys.stderr)
lp_handle.write("Binary\n")
for i, score in nodes:
lp_handle.write(" x%d\n" % i)
lp_handle.write("End\n")
lp_data = lp_handle.getvalue()
lp_handle.close()
return lp_data | Maximize
4 x1 + 2 x2 + 3 x3 + x4
Subject To
x1 + x2 <= 1
End | Below is the the instruction that describes the task:
### Input:
Maximize
4 x1 + 2 x2 + 3 x3 + x4
Subject To
x1 + x2 <= 1
End
### Response:
def format_lp(nodes, constraints_x, qa, constraints_y, qb):
"""
Maximize
4 x1 + 2 x2 + 3 x3 + x4
Subject To
x1 + x2 <= 1
End
"""
lp_handle = cStringIO.StringIO()
lp_handle.write("Maximize\n ")
records = 0
for i, score in nodes:
lp_handle.write("+ %d x%d " % (score, i))
# SCIP does not like really long string per row
records += 1
if records % 10 == 0:
lp_handle.write("\n")
lp_handle.write("\n")
num_of_constraints = 0
lp_handle.write("Subject To\n")
for c in constraints_x:
additions = " + ".join("x%d" % (x+1) for x in c)
lp_handle.write(" %s <= %d\n" % (additions, qa))
num_of_constraints += len(constraints_x)
# non-self
if not (constraints_x is constraints_y):
for c in constraints_y:
additions = " + ".join("x%d" % (x+1) for x in c)
lp_handle.write(" %s <= %d\n" % (additions, qb))
num_of_constraints += len(constraints_y)
print("number of variables (%d), number of constraints (%d)" %
(len(nodes), num_of_constraints), file=sys.stderr)
lp_handle.write("Binary\n")
for i, score in nodes:
lp_handle.write(" x%d\n" % i)
lp_handle.write("End\n")
lp_data = lp_handle.getvalue()
lp_handle.close()
return lp_data |
def load_loops(directory, loops_path=None):
"""
Return a list of tuples indicating the start and end points of the loops
that were sampled in the given directory.
"""
if loops_path is None:
workspace = workspace_from_dir(directory)
loops_path = workspace.loops_path
from klab.rosetta.input_files import LoopsFile
loops_parser = LoopsFile.from_filepath(loops_path)
# We have to account for some weird indexing behavior in the loops file
# parser that I don't really understand. It seems to shrink the loop by
# one residue on each side. At first I thought it might be trying to
# convert the indices to python indexing, but on second thought I have no
# idea what it's trying to do.
return [(x-1, y+1) for x, y in loops_parser.get_distinct_segments()] | Return a list of tuples indicating the start and end points of the loops
that were sampled in the given directory. | Below is the the instruction that describes the task:
### Input:
Return a list of tuples indicating the start and end points of the loops
that were sampled in the given directory.
### Response:
def load_loops(directory, loops_path=None):
"""
Return a list of tuples indicating the start and end points of the loops
that were sampled in the given directory.
"""
if loops_path is None:
workspace = workspace_from_dir(directory)
loops_path = workspace.loops_path
from klab.rosetta.input_files import LoopsFile
loops_parser = LoopsFile.from_filepath(loops_path)
# We have to account for some weird indexing behavior in the loops file
# parser that I don't really understand. It seems to shrink the loop by
# one residue on each side. At first I thought it might be trying to
# convert the indices to python indexing, but on second thought I have no
# idea what it's trying to do.
return [(x-1, y+1) for x, y in loops_parser.get_distinct_segments()] |
def register(key, initializer: callable, param=None):
'''Adds resolver to global container'''
get_current_scope().container.register(key, initializer, param) | Adds resolver to global container | Below is the the instruction that describes the task:
### Input:
Adds resolver to global container
### Response:
def register(key, initializer: callable, param=None):
'''Adds resolver to global container'''
get_current_scope().container.register(key, initializer, param) |
def add_match(self, entity, *traits):
"""
Add a matching entity to the index.
We have to maintain the constraints of the data layout:
- `self.mismatch_unknown` must still contain all matched entities
- each key of the index must mismatch all known matching entities except those this particular key
explicitly includes
For data layout description, see the class-level docstring.
:param collections.Hashable entity: an object to be matching the values of `traits_indexed_by`
:param list traits: a list of hashable values to index the object with
"""
# The index traits of `traits_indexed_by` might have already been used to index some other entities. Those
# relations are to be preserved. If the trait was not used to index any entity, we initialize them to mismatch
# all matching entities known so far.
for trait in traits:
if trait not in self.index:
self.index[trait] = self.mismatch_unknown.copy()
# Now each known trait this entity is not matching, will explicitly mismatch currently added entity.
for existing_trait in self.index:
if existing_trait not in traits:
self.index[existing_trait].add(entity)
# From now on, any new matching or mismatching index will mismatch this entity by default.
self.mismatch_unknown.add(entity) | Add a matching entity to the index.
We have to maintain the constraints of the data layout:
- `self.mismatch_unknown` must still contain all matched entities
- each key of the index must mismatch all known matching entities except those this particular key
explicitly includes
For data layout description, see the class-level docstring.
:param collections.Hashable entity: an object to be matching the values of `traits_indexed_by`
:param list traits: a list of hashable values to index the object with | Below is the the instruction that describes the task:
### Input:
Add a matching entity to the index.
We have to maintain the constraints of the data layout:
- `self.mismatch_unknown` must still contain all matched entities
- each key of the index must mismatch all known matching entities except those this particular key
explicitly includes
For data layout description, see the class-level docstring.
:param collections.Hashable entity: an object to be matching the values of `traits_indexed_by`
:param list traits: a list of hashable values to index the object with
### Response:
def add_match(self, entity, *traits):
"""
Add a matching entity to the index.
We have to maintain the constraints of the data layout:
- `self.mismatch_unknown` must still contain all matched entities
- each key of the index must mismatch all known matching entities except those this particular key
explicitly includes
For data layout description, see the class-level docstring.
:param collections.Hashable entity: an object to be matching the values of `traits_indexed_by`
:param list traits: a list of hashable values to index the object with
"""
# The index traits of `traits_indexed_by` might have already been used to index some other entities. Those
# relations are to be preserved. If the trait was not used to index any entity, we initialize them to mismatch
# all matching entities known so far.
for trait in traits:
if trait not in self.index:
self.index[trait] = self.mismatch_unknown.copy()
# Now each known trait this entity is not matching, will explicitly mismatch currently added entity.
for existing_trait in self.index:
if existing_trait not in traits:
self.index[existing_trait].add(entity)
# From now on, any new matching or mismatching index will mismatch this entity by default.
self.mismatch_unknown.add(entity) |
def _remove_buffers(state):
"""Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
"""
buffer_paths, buffers = [], []
state = _separate_buffers(state, [], buffer_paths, buffers)
return state, buffer_paths, buffers | Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>]) | Below is the the instruction that describes the task:
### Input:
Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
### Response:
def _remove_buffers(state):
"""Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
"""
buffer_paths, buffers = [], []
state = _separate_buffers(state, [], buffer_paths, buffers)
return state, buffer_paths, buffers |
def collect_blame_info(cls, matches):
"""Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines.
"""
old_area = None
for filename, ranges in matches:
area, name = os.path.split(filename)
if not area:
area = '.'
if area != old_area:
print("\n\n%s/\n" % area)
old_area = area
print("%s " % name, end="")
filter = cls.build_line_range_filter(ranges)
command = ['git', 'blame', '--line-porcelain'] + filter + [name]
os.chdir(area)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print(" <<<<<<<<<< Unable to collect 'git blame' info:", err)
else:
yield out | Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines. | Below is the the instruction that describes the task:
### Input:
Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines.
### Response:
def collect_blame_info(cls, matches):
"""Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines.
"""
old_area = None
for filename, ranges in matches:
area, name = os.path.split(filename)
if not area:
area = '.'
if area != old_area:
print("\n\n%s/\n" % area)
old_area = area
print("%s " % name, end="")
filter = cls.build_line_range_filter(ranges)
command = ['git', 'blame', '--line-porcelain'] + filter + [name]
os.chdir(area)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print(" <<<<<<<<<< Unable to collect 'git blame' info:", err)
else:
yield out |
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = OrderedDict(self.variables)
else:
# don't align because we already called xarray.align
variables = expand_and_merge_variables(
[self.variables, other.variables])
return variables | For use with binary arithmetic. | Below is the the instruction that describes the task:
### Input:
For use with binary arithmetic.
### Response:
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = OrderedDict(self.variables)
else:
# don't align because we already called xarray.align
variables = expand_and_merge_variables(
[self.variables, other.variables])
return variables |
def restore(self):
"""
Restore the snapshot
"""
yield from self._project.delete_on_computes()
# We don't send close notif to clients because the close / open dance is purely internal
yield from self._project.close(ignore_notification=True)
self._project.controller.notification.emit("snapshot.restored", self.__json__())
try:
if os.path.exists(os.path.join(self._project.path, "project-files")):
shutil.rmtree(os.path.join(self._project.path, "project-files"))
with open(self._path, "rb") as f:
project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path)
except (OSError, PermissionError) as e:
raise aiohttp.web.HTTPConflict(text=str(e))
yield from project.open()
return project | Restore the snapshot | Below is the the instruction that describes the task:
### Input:
Restore the snapshot
### Response:
def restore(self):
"""
Restore the snapshot
"""
yield from self._project.delete_on_computes()
# We don't send close notif to clients because the close / open dance is purely internal
yield from self._project.close(ignore_notification=True)
self._project.controller.notification.emit("snapshot.restored", self.__json__())
try:
if os.path.exists(os.path.join(self._project.path, "project-files")):
shutil.rmtree(os.path.join(self._project.path, "project-files"))
with open(self._path, "rb") as f:
project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path)
except (OSError, PermissionError) as e:
raise aiohttp.web.HTTPConflict(text=str(e))
yield from project.open()
return project |
def loaded(self, request, *args, **kwargs):
"""Return a list of loaded Packs.
"""
serializer = self.get_serializer(list(Pack.objects.all()),
many=True)
return Response(serializer.data) | Return a list of loaded Packs. | Below is the the instruction that describes the task:
### Input:
Return a list of loaded Packs.
### Response:
def loaded(self, request, *args, **kwargs):
"""Return a list of loaded Packs.
"""
serializer = self.get_serializer(list(Pack.objects.all()),
many=True)
return Response(serializer.data) |
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member")
trunk_list_src_port = ET.SubElement(trunk_list_member, "trunk-list-src-port")
trunk_list_src_port.text = kwargs.pop('trunk_list_src_port')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member")
trunk_list_src_port = ET.SubElement(trunk_list_member, "trunk-list-src-port")
trunk_list_src_port.text = kwargs.pop('trunk_list_src_port')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def generate_enums_info(enums, msgs):
"""Add camel case swift names for enums an entries, descriptions and sort enums alphabetically"""
for enum in enums:
enum.swift_name = camel_case_from_underscores(enum.name)
enum.raw_value_type = get_enum_raw_type(enum, msgs)
enum.formatted_description = ""
if enum.description:
enum.description = " ".join(enum.description.split())
enum.formatted_description = "\n/**\n %s\n*/\n" % enum.description
all_entities = []
entities_info = []
for entry in enum.entry:
name = entry.name.replace(enum.name + '_', '')
"""Ensure that enums entry name does not start from digit"""
if name[0].isdigit():
name = "MAV_" + name
entry.swift_name = camel_case_from_underscores(name)
entry.formatted_description = ""
if entry.description:
entry.description = " ".join(entry.description.split())
entry.formatted_description = "\n\t/// " + entry.description + "\n"
all_entities.append(entry.swift_name)
entities_info.append('("%s", "%s")' % (entry.name, entry.description.replace('"','\\"')))
enum.all_entities = ", ".join(all_entities)
enum.entities_info = ", ".join(entities_info)
enum.entity_description = enum.description.replace('"','\\"')
enums.sort(key = lambda enum : enum.swift_name) | Add camel case swift names for enums an entries, descriptions and sort enums alphabetically | Below is the the instruction that describes the task:
### Input:
Add camel case swift names for enums an entries, descriptions and sort enums alphabetically
### Response:
def generate_enums_info(enums, msgs):
"""Add camel case swift names for enums an entries, descriptions and sort enums alphabetically"""
for enum in enums:
enum.swift_name = camel_case_from_underscores(enum.name)
enum.raw_value_type = get_enum_raw_type(enum, msgs)
enum.formatted_description = ""
if enum.description:
enum.description = " ".join(enum.description.split())
enum.formatted_description = "\n/**\n %s\n*/\n" % enum.description
all_entities = []
entities_info = []
for entry in enum.entry:
name = entry.name.replace(enum.name + '_', '')
"""Ensure that enums entry name does not start from digit"""
if name[0].isdigit():
name = "MAV_" + name
entry.swift_name = camel_case_from_underscores(name)
entry.formatted_description = ""
if entry.description:
entry.description = " ".join(entry.description.split())
entry.formatted_description = "\n\t/// " + entry.description + "\n"
all_entities.append(entry.swift_name)
entities_info.append('("%s", "%s")' % (entry.name, entry.description.replace('"','\\"')))
enum.all_entities = ", ".join(all_entities)
enum.entities_info = ", ".join(entities_info)
enum.entity_description = enum.description.replace('"','\\"')
enums.sort(key = lambda enum : enum.swift_name) |
def _configure_manager(self):
"""
Creates a manager to handle the instances, and another
to handle flavors.
"""
self._manager = CloudDatabaseManager(self,
resource_class=CloudDatabaseInstance, response_key="instance",
uri_base="instances")
self._flavor_manager = BaseManager(self,
resource_class=CloudDatabaseFlavor, response_key="flavor",
uri_base="flavors")
self._backup_manager = CloudDatabaseBackupManager(self,
resource_class=CloudDatabaseBackup, response_key="backup",
uri_base="backups") | Creates a manager to handle the instances, and another
to handle flavors. | Below is the the instruction that describes the task:
### Input:
Creates a manager to handle the instances, and another
to handle flavors.
### Response:
def _configure_manager(self):
"""
Creates a manager to handle the instances, and another
to handle flavors.
"""
self._manager = CloudDatabaseManager(self,
resource_class=CloudDatabaseInstance, response_key="instance",
uri_base="instances")
self._flavor_manager = BaseManager(self,
resource_class=CloudDatabaseFlavor, response_key="flavor",
uri_base="flavors")
self._backup_manager = CloudDatabaseBackupManager(self,
resource_class=CloudDatabaseBackup, response_key="backup",
uri_base="backups") |
def enum_subpattern(p):
subpattern_id, d = p
patterns = list(enum_gen(d))
'''
if subpattern_id:
subpat_iter = EnumSubpatternIterator(subpattern_id, patterns)
SUBPATTERNS[subpattern_id] = subpat_iter
return subpat_iter
else:
return patterns
'''
return patterns | if subpattern_id:
subpat_iter = EnumSubpatternIterator(subpattern_id, patterns)
SUBPATTERNS[subpattern_id] = subpat_iter
return subpat_iter
else:
return patterns | Below is the the instruction that describes the task:
### Input:
if subpattern_id:
subpat_iter = EnumSubpatternIterator(subpattern_id, patterns)
SUBPATTERNS[subpattern_id] = subpat_iter
return subpat_iter
else:
return patterns
### Response:
def enum_subpattern(p):
subpattern_id, d = p
patterns = list(enum_gen(d))
'''
if subpattern_id:
subpat_iter = EnumSubpatternIterator(subpattern_id, patterns)
SUBPATTERNS[subpattern_id] = subpat_iter
return subpat_iter
else:
return patterns
'''
return patterns |
def _print_sql_with_error(self, sql, error_line):
"""
Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted.
:param str sql: The SQL statement.
:param int error_line: The line where the error occurs.
"""
if os.linesep in sql:
lines = sql.split(os.linesep)
digits = math.ceil(math.log(len(lines) + 1, 10))
i = 1
for line in lines:
if i == error_line:
self._io.text('<error>{0:{width}} {1}</error>'.format(i, line, width=digits, ))
else:
self._io.text('{0:{width}} {1}'.format(i, line, width=digits, ))
i += 1
else:
self._io.text(sql) | Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted.
:param str sql: The SQL statement.
:param int error_line: The line where the error occurs. | Below is the the instruction that describes the task:
### Input:
Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted.
:param str sql: The SQL statement.
:param int error_line: The line where the error occurs.
### Response:
def _print_sql_with_error(self, sql, error_line):
"""
Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted.
:param str sql: The SQL statement.
:param int error_line: The line where the error occurs.
"""
if os.linesep in sql:
lines = sql.split(os.linesep)
digits = math.ceil(math.log(len(lines) + 1, 10))
i = 1
for line in lines:
if i == error_line:
self._io.text('<error>{0:{width}} {1}</error>'.format(i, line, width=digits, ))
else:
self._io.text('{0:{width}} {1}'.format(i, line, width=digits, ))
i += 1
else:
self._io.text(sql) |
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
unitary = _unitary(norm)
output = mkl_fft.ifftn(a, s, axes)
if unitary:
output *= sqrt(_tot_size(output, axes))
return output | Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show() | Below is the the instruction that describes the task:
### Input:
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
### Response:
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
unitary = _unitary(norm)
output = mkl_fft.ifftn(a, s, axes)
if unitary:
output *= sqrt(_tot_size(output, axes))
return output |
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Truncate all existing tables in current keyspace.
:returns: an empty list
"""
for table in tables:
qs = "TRUNCATE {}".format(table)
self.connection.connection.execute(qs)
return [] | Truncate all existing tables in current keyspace.
:returns: an empty list | Below is the the instruction that describes the task:
### Input:
Truncate all existing tables in current keyspace.
:returns: an empty list
### Response:
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Truncate all existing tables in current keyspace.
:returns: an empty list
"""
for table in tables:
qs = "TRUNCATE {}".format(table)
self.connection.connection.execute(qs)
return [] |
def update_time_login(u_name):
'''
Update the login time for user.
'''
entry = TabMember.update(
time_login=tools.timestamp()
).where(
TabMember.user_name == u_name
)
entry.execute() | Update the login time for user. | Below is the the instruction that describes the task:
### Input:
Update the login time for user.
### Response:
def update_time_login(u_name):
'''
Update the login time for user.
'''
entry = TabMember.update(
time_login=tools.timestamp()
).where(
TabMember.user_name == u_name
)
entry.execute() |