code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def create_key(key_name, save_path, region=None, key=None, keyid=None,
profile=None):
'''
Creates a key and saves it to a given path.
Returns the private key.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.create_key mykey /root/
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
key = conn.create_key_pair(key_name)
log.debug("the key to return is : %s", key)
key.save(save_path)
return key.material
except boto.exception.BotoServerError as e:
log.debug(e)
return False | Creates a key and saves it to a given path.
Returns the private key.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.create_key mykey /root/ | Below is the the instruction that describes the task:
### Input:
Creates a key and saves it to a given path.
Returns the private key.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.create_key mykey /root/
### Response:
def create_key(key_name, save_path, region=None, key=None, keyid=None,
profile=None):
'''
Creates a key and saves it to a given path.
Returns the private key.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.create_key mykey /root/
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
key = conn.create_key_pair(key_name)
log.debug("the key to return is : %s", key)
key.save(save_path)
return key.material
except boto.exception.BotoServerError as e:
log.debug(e)
return False |
def save(self, designName=""):
# type: (ASaveDesign) -> None
"""Save the current design to file"""
self.try_stateful_function(
ss.SAVING, ss.READY, self.do_save, designName) | Save the current design to file | Below is the the instruction that describes the task:
### Input:
Save the current design to file
### Response:
def save(self, designName=""):
# type: (ASaveDesign) -> None
"""Save the current design to file"""
self.try_stateful_function(
ss.SAVING, ss.READY, self.do_save, designName) |
def registerInternalRouters(app):
"""
These are the endpoints which are used to display measurements in the
flask-profiler dashboard.
Note: these should be defined after wrapping user defined endpoints
via wrapAppEndpoints()
:param app: Flask application instance
:return:
"""
urlPath = CONF.get("endpointRoot", "flask-profiler")
fp = Blueprint(
'flask-profiler', __name__,
url_prefix="/" + urlPath,
static_folder="static/dist/", static_url_path='/static/dist')
@fp.route("/".format(urlPath))
@auth.login_required
def index():
return fp.send_static_file("index.html")
@fp.route("/api/measurements/".format(urlPath))
@auth.login_required
def filterMeasurements():
args = dict(request.args.items())
measurements = collection.filter(args)
return jsonify({"measurements": list(measurements)})
@fp.route("/api/measurements/grouped".format(urlPath))
@auth.login_required
def getMeasurementsSummary():
args = dict(request.args.items())
measurements = collection.getSummary(args)
return jsonify({"measurements": list(measurements)})
@fp.route("/api/measurements/<measurementId>".format(urlPath))
@auth.login_required
def getContext(measurementId):
return jsonify(collection.get(measurementId))
@fp.route("/api/measurements/timeseries/".format(urlPath))
@auth.login_required
def getRequestsTimeseries():
args = dict(request.args.items())
return jsonify({"series": collection.getTimeseries(args)})
@fp.route("/api/measurements/methodDistribution/".format(urlPath))
@auth.login_required
def getMethodDistribution():
args = dict(request.args.items())
return jsonify({
"distribution": collection.getMethodDistribution(args)})
@fp.route("/db/dumpDatabase")
@auth.login_required
def dumpDatabase():
response = jsonify({
"summary": collection.getSummary()})
response.headers["Content-Disposition"] = "attachment; filename=dump.json"
return response
@fp.route("/db/deleteDatabase")
@auth.login_required
def deleteDatabase():
response = jsonify({
"status": collection.truncate()})
return response
@fp.after_request
def x_robots_tag_header(response):
response.headers['X-Robots-Tag'] = 'noindex, nofollow'
return response
app.register_blueprint(fp) | These are the endpoints which are used to display measurements in the
flask-profiler dashboard.
Note: these should be defined after wrapping user defined endpoints
via wrapAppEndpoints()
:param app: Flask application instance
:return: | Below is the the instruction that describes the task:
### Input:
These are the endpoints which are used to display measurements in the
flask-profiler dashboard.
Note: these should be defined after wrapping user defined endpoints
via wrapAppEndpoints()
:param app: Flask application instance
:return:
### Response:
def registerInternalRouters(app):
"""
These are the endpoints which are used to display measurements in the
flask-profiler dashboard.
Note: these should be defined after wrapping user defined endpoints
via wrapAppEndpoints()
:param app: Flask application instance
:return:
"""
urlPath = CONF.get("endpointRoot", "flask-profiler")
fp = Blueprint(
'flask-profiler', __name__,
url_prefix="/" + urlPath,
static_folder="static/dist/", static_url_path='/static/dist')
@fp.route("/".format(urlPath))
@auth.login_required
def index():
return fp.send_static_file("index.html")
@fp.route("/api/measurements/".format(urlPath))
@auth.login_required
def filterMeasurements():
args = dict(request.args.items())
measurements = collection.filter(args)
return jsonify({"measurements": list(measurements)})
@fp.route("/api/measurements/grouped".format(urlPath))
@auth.login_required
def getMeasurementsSummary():
args = dict(request.args.items())
measurements = collection.getSummary(args)
return jsonify({"measurements": list(measurements)})
@fp.route("/api/measurements/<measurementId>".format(urlPath))
@auth.login_required
def getContext(measurementId):
return jsonify(collection.get(measurementId))
@fp.route("/api/measurements/timeseries/".format(urlPath))
@auth.login_required
def getRequestsTimeseries():
args = dict(request.args.items())
return jsonify({"series": collection.getTimeseries(args)})
@fp.route("/api/measurements/methodDistribution/".format(urlPath))
@auth.login_required
def getMethodDistribution():
args = dict(request.args.items())
return jsonify({
"distribution": collection.getMethodDistribution(args)})
@fp.route("/db/dumpDatabase")
@auth.login_required
def dumpDatabase():
response = jsonify({
"summary": collection.getSummary()})
response.headers["Content-Disposition"] = "attachment; filename=dump.json"
return response
@fp.route("/db/deleteDatabase")
@auth.login_required
def deleteDatabase():
response = jsonify({
"status": collection.truncate()})
return response
@fp.after_request
def x_robots_tag_header(response):
response.headers['X-Robots-Tag'] = 'noindex, nofollow'
return response
app.register_blueprint(fp) |
def _parse_pool_transaction_file(
ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators,
ledger_size=None):
"""
helper function for parseLedgerForHaAndKeys
"""
for _, txn in ledger.getAllTxn(to=ledger_size):
if get_type(txn) == NODE:
txn_data = get_payload_data(txn)
nodeName = txn_data[DATA][ALIAS]
clientStackName = nodeName + CLIENT_STACK_SUFFIX
nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \
if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \
else None
cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \
if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \
else None
if nHa:
nodeReg[nodeName] = HA(*nHa)
if cHa:
cliNodeReg[clientStackName] = HA(*cHa)
try:
# TODO: Need to handle abbreviated verkey
key_type = 'verkey'
verkey = cryptonymToHex(str(txn_data[TARGET_NYM]))
key_type = 'identifier'
cryptonymToHex(get_from(txn))
except ValueError:
logger.exception(
'Invalid {}. Rebuild pool transactions.'.format(key_type))
exit('Invalid {}. Rebuild pool transactions.'.format(key_type))
nodeKeys[nodeName] = verkey
services = txn_data[DATA].get(SERVICES)
if isinstance(services, list):
if VALIDATOR in services:
activeValidators.add(nodeName)
else:
activeValidators.discard(nodeName) | helper function for parseLedgerForHaAndKeys | Below is the the instruction that describes the task:
### Input:
helper function for parseLedgerForHaAndKeys
### Response:
def _parse_pool_transaction_file(
ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators,
ledger_size=None):
"""
helper function for parseLedgerForHaAndKeys
"""
for _, txn in ledger.getAllTxn(to=ledger_size):
if get_type(txn) == NODE:
txn_data = get_payload_data(txn)
nodeName = txn_data[DATA][ALIAS]
clientStackName = nodeName + CLIENT_STACK_SUFFIX
nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \
if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \
else None
cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \
if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \
else None
if nHa:
nodeReg[nodeName] = HA(*nHa)
if cHa:
cliNodeReg[clientStackName] = HA(*cHa)
try:
# TODO: Need to handle abbreviated verkey
key_type = 'verkey'
verkey = cryptonymToHex(str(txn_data[TARGET_NYM]))
key_type = 'identifier'
cryptonymToHex(get_from(txn))
except ValueError:
logger.exception(
'Invalid {}. Rebuild pool transactions.'.format(key_type))
exit('Invalid {}. Rebuild pool transactions.'.format(key_type))
nodeKeys[nodeName] = verkey
services = txn_data[DATA].get(SERVICES)
if isinstance(services, list):
if VALIDATOR in services:
activeValidators.add(nodeName)
else:
activeValidators.discard(nodeName) |
def RunValidationFromOptions(feed, options):
"""Validate feed, run in profiler if in options, and return an exit code."""
if options.performance:
return ProfileRunValidationOutputFromOptions(feed, options)
else:
return RunValidationOutputFromOptions(feed, options) | Validate feed, run in profiler if in options, and return an exit code. | Below is the the instruction that describes the task:
### Input:
Validate feed, run in profiler if in options, and return an exit code.
### Response:
def RunValidationFromOptions(feed, options):
"""Validate feed, run in profiler if in options, and return an exit code."""
if options.performance:
return ProfileRunValidationOutputFromOptions(feed, options)
else:
return RunValidationOutputFromOptions(feed, options) |
def discard(self, changeset_id: uuid.UUID) -> None:
"""
Throws away all journaled data starting at the given changeset
"""
self._validate_changeset(changeset_id)
self.journal.pop_changeset(changeset_id) | Throws away all journaled data starting at the given changeset | Below is the the instruction that describes the task:
### Input:
Throws away all journaled data starting at the given changeset
### Response:
def discard(self, changeset_id: uuid.UUID) -> None:
"""
Throws away all journaled data starting at the given changeset
"""
self._validate_changeset(changeset_id)
self.journal.pop_changeset(changeset_id) |
def textalign(text, maxlength, align='left'):
"""
Align Text When Given Full Length
"""
if align == 'left':
return text
elif align == 'centre' or align == 'center':
spaces = ' ' * (int((maxlength - len(text)) / 2))
elif align == 'right':
spaces = (maxlength - len(text))
else:
raise ValueError("Invalid alignment specified.")
return spaces + text | Align Text When Given Full Length | Below is the the instruction that describes the task:
### Input:
Align Text When Given Full Length
### Response:
def textalign(text, maxlength, align='left'):
"""
Align Text When Given Full Length
"""
if align == 'left':
return text
elif align == 'centre' or align == 'center':
spaces = ' ' * (int((maxlength - len(text)) / 2))
elif align == 'right':
spaces = (maxlength - len(text))
else:
raise ValueError("Invalid alignment specified.")
return spaces + text |
def integrate(self,t,pot,method='symplec4_c',dt=None):
"""
NAME:
integrate
PURPOSE:
integrate the orbit
INPUT:
t - list of times at which to output (0 has to be in this!)
pot - potential instance or list of instances
method= 'odeint' for scipy's odeint
'leapfrog' for a simple leapfrog implementation
'leapfrog_c' for a simple leapfrog implementation in C
'rk4_c' for a 4th-order Runge-Kutta integrator in C
'rk6_c' for a 6-th order Runge-Kutta integrator in C
'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest)
dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
error message number (get the actual orbit using getOrbit()
HISTORY:
2010-07-20
"""
if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp')
if hasattr(self,'rs'): delattr(self,'rs')
thispot= RZToplanarPotential(pot)
self.t= nu.array(t)
self._pot= thispot
self.orbit, msg= _integrateROrbit(self.vxvv,thispot,t,method,dt)
return msg | NAME:
integrate
PURPOSE:
integrate the orbit
INPUT:
t - list of times at which to output (0 has to be in this!)
pot - potential instance or list of instances
method= 'odeint' for scipy's odeint
'leapfrog' for a simple leapfrog implementation
'leapfrog_c' for a simple leapfrog implementation in C
'rk4_c' for a 4th-order Runge-Kutta integrator in C
'rk6_c' for a 6-th order Runge-Kutta integrator in C
'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest)
dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
error message number (get the actual orbit using getOrbit()
HISTORY:
2010-07-20 | Below is the the instruction that describes the task:
### Input:
NAME:
integrate
PURPOSE:
integrate the orbit
INPUT:
t - list of times at which to output (0 has to be in this!)
pot - potential instance or list of instances
method= 'odeint' for scipy's odeint
'leapfrog' for a simple leapfrog implementation
'leapfrog_c' for a simple leapfrog implementation in C
'rk4_c' for a 4th-order Runge-Kutta integrator in C
'rk6_c' for a 6-th order Runge-Kutta integrator in C
'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest)
dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
error message number (get the actual orbit using getOrbit()
HISTORY:
2010-07-20
### Response:
def integrate(self,t,pot,method='symplec4_c',dt=None):
"""
NAME:
integrate
PURPOSE:
integrate the orbit
INPUT:
t - list of times at which to output (0 has to be in this!)
pot - potential instance or list of instances
method= 'odeint' for scipy's odeint
'leapfrog' for a simple leapfrog implementation
'leapfrog_c' for a simple leapfrog implementation in C
'rk4_c' for a 4th-order Runge-Kutta integrator in C
'rk6_c' for a 6-th order Runge-Kutta integrator in C
'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest)
dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
error message number (get the actual orbit using getOrbit()
HISTORY:
2010-07-20
"""
if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp')
if hasattr(self,'rs'): delattr(self,'rs')
thispot= RZToplanarPotential(pot)
self.t= nu.array(t)
self._pot= thispot
self.orbit, msg= _integrateROrbit(self.vxvv,thispot,t,method,dt)
return msg |
def INDEX_OF_CP(string_expression, substring_expression, start=None, end=None):
"""
Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence.
If the substring is not found, returns -1.
https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/
for more details
:param string_expression: The string or expression of string
:param substring_expression: The string or expression of substring
:param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search.
:param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search.
:return: Aggregation operator
"""
res = [string_expression, substring_expression]
if start is not None:
res.append(start)
if end is not None:
res.append(end)
return {'$indexOfCP': res} | Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence.
If the substring is not found, returns -1.
https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/
for more details
:param string_expression: The string or expression of string
:param substring_expression: The string or expression of substring
:param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search.
:param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search.
:return: Aggregation operator | Below is the the instruction that describes the task:
### Input:
Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence.
If the substring is not found, returns -1.
https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/
for more details
:param string_expression: The string or expression of string
:param substring_expression: The string or expression of substring
:param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search.
:param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search.
:return: Aggregation operator
### Response:
def INDEX_OF_CP(string_expression, substring_expression, start=None, end=None):
"""
Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence.
If the substring is not found, returns -1.
https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/
for more details
:param string_expression: The string or expression of string
:param substring_expression: The string or expression of substring
:param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search.
:param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search.
:return: Aggregation operator
"""
res = [string_expression, substring_expression]
if start is not None:
res.append(start)
if end is not None:
res.append(end)
return {'$indexOfCP': res} |
def getFieldsForActiveJobsOfType(self, jobType, fields=[]):
""" Helper function for querying the models table including relevant job
info where the job type matches the specified jobType. Only records for
which there is a matching jobId in both tables is returned, and only the
requested fields are returned in each result, assuming that there is not
a conflict. This function is useful, for example, in querying a cluster
for a list of actively running production models (according to the state
of the client jobs database). jobType must be one of the JOB_TYPE_XXXX
enumerations.
Parameters:
----------------------------------------------------------------
jobType: jobType enum
fields: list of fields to return
Returns: List of tuples containing the jobId and requested field values
"""
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = \
'SELECT DISTINCT %s ' \
'FROM %s j ' \
'LEFT JOIN %s m USING(job_id) '\
'WHERE j.status != %%s ' \
'AND _eng_job_type = %%s' % (dbFieldsStr, self.jobsTableName,
self.modelsTableName)
conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType])
return conn.cursor.fetchall() | Helper function for querying the models table including relevant job
info where the job type matches the specified jobType. Only records for
which there is a matching jobId in both tables is returned, and only the
requested fields are returned in each result, assuming that there is not
a conflict. This function is useful, for example, in querying a cluster
for a list of actively running production models (according to the state
of the client jobs database). jobType must be one of the JOB_TYPE_XXXX
enumerations.
Parameters:
----------------------------------------------------------------
jobType: jobType enum
fields: list of fields to return
Returns: List of tuples containing the jobId and requested field values | Below is the the instruction that describes the task:
### Input:
Helper function for querying the models table including relevant job
info where the job type matches the specified jobType. Only records for
which there is a matching jobId in both tables is returned, and only the
requested fields are returned in each result, assuming that there is not
a conflict. This function is useful, for example, in querying a cluster
for a list of actively running production models (according to the state
of the client jobs database). jobType must be one of the JOB_TYPE_XXXX
enumerations.
Parameters:
----------------------------------------------------------------
jobType: jobType enum
fields: list of fields to return
Returns: List of tuples containing the jobId and requested field values
### Response:
def getFieldsForActiveJobsOfType(self, jobType, fields=[]):
""" Helper function for querying the models table including relevant job
info where the job type matches the specified jobType. Only records for
which there is a matching jobId in both tables is returned, and only the
requested fields are returned in each result, assuming that there is not
a conflict. This function is useful, for example, in querying a cluster
for a list of actively running production models (according to the state
of the client jobs database). jobType must be one of the JOB_TYPE_XXXX
enumerations.
Parameters:
----------------------------------------------------------------
jobType: jobType enum
fields: list of fields to return
Returns: List of tuples containing the jobId and requested field values
"""
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = \
'SELECT DISTINCT %s ' \
'FROM %s j ' \
'LEFT JOIN %s m USING(job_id) '\
'WHERE j.status != %%s ' \
'AND _eng_job_type = %%s' % (dbFieldsStr, self.jobsTableName,
self.modelsTableName)
conn.cursor.execute(query, [self.STATUS_COMPLETED, jobType])
return conn.cursor.fetchall() |
def _is_updated(old_conf, new_conf):
'''
Compare the API results to the current statefile data
'''
changed = {}
# Dirty json hacking to get parameters in the same format
new_conf = _json_to_unicode(salt.utils.json.loads(
salt.utils.json.dumps(new_conf, ensure_ascii=False)))
old_conf = salt.utils.json.loads(salt.utils.json.dumps(old_conf, ensure_ascii=False))
for key, value in old_conf.items():
oldval = six.text_type(value).lower()
if key in new_conf:
newval = six.text_type(new_conf[key]).lower()
if oldval == 'null' or oldval == 'none':
oldval = ''
if key in new_conf and newval != oldval:
changed[key] = {'old': oldval, 'new': newval}
return changed | Compare the API results to the current statefile data | Below is the the instruction that describes the task:
### Input:
Compare the API results to the current statefile data
### Response:
def _is_updated(old_conf, new_conf):
'''
Compare the API results to the current statefile data
'''
changed = {}
# Dirty json hacking to get parameters in the same format
new_conf = _json_to_unicode(salt.utils.json.loads(
salt.utils.json.dumps(new_conf, ensure_ascii=False)))
old_conf = salt.utils.json.loads(salt.utils.json.dumps(old_conf, ensure_ascii=False))
for key, value in old_conf.items():
oldval = six.text_type(value).lower()
if key in new_conf:
newval = six.text_type(new_conf[key]).lower()
if oldval == 'null' or oldval == 'none':
oldval = ''
if key in new_conf and newval != oldval:
changed[key] = {'old': oldval, 'new': newval}
return changed |
def field_value(self, value):
"""Validate against NodeType.
"""
if not self.is_array:
return self.field_type(value)
if isinstance(value, (list, tuple, set)):
return [self.field_type(item) for item in value]
return self.field_type(value) | Validate against NodeType. | Below is the the instruction that describes the task:
### Input:
Validate against NodeType.
### Response:
def field_value(self, value):
"""Validate against NodeType.
"""
if not self.is_array:
return self.field_type(value)
if isinstance(value, (list, tuple, set)):
return [self.field_type(item) for item in value]
return self.field_type(value) |
def get_monitor_value(self):
"Pick the monitored value."
if self.monitor=='trn_loss' and len(self.learn.recorder.losses) == 0: return None
elif len(self.learn.recorder.val_losses) == 0: return None
values = {'train_loss':self.learn.recorder.losses[-1].cpu().numpy(),
'valid_loss':self.learn.recorder.val_losses[-1]}
if values['valid_loss'] is None: return
if self.learn.recorder.metrics:
for m, n in zip(self.learn.recorder.metrics[-1],self.learn.recorder.names[3:-1]):
values[n] = m
if values.get(self.monitor) is None:
warn(f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {", ".join(map(str, self.learn.recorder.names[1:-1]))}')
return values.get(self.monitor) | Pick the monitored value. | Below is the the instruction that describes the task:
### Input:
Pick the monitored value.
### Response:
def get_monitor_value(self):
"Pick the monitored value."
if self.monitor=='trn_loss' and len(self.learn.recorder.losses) == 0: return None
elif len(self.learn.recorder.val_losses) == 0: return None
values = {'train_loss':self.learn.recorder.losses[-1].cpu().numpy(),
'valid_loss':self.learn.recorder.val_losses[-1]}
if values['valid_loss'] is None: return
if self.learn.recorder.metrics:
for m, n in zip(self.learn.recorder.metrics[-1],self.learn.recorder.names[3:-1]):
values[n] = m
if values.get(self.monitor) is None:
warn(f'{self.__class__} conditioned on metric `{self.monitor}` which is not available. Available metrics are: {", ".join(map(str, self.learn.recorder.names[1:-1]))}')
return values.get(self.monitor) |
def ndcg(truth, recommend, k=None):
"""Normalized Discounted Cumulative Grain (NDCG).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
k (int): Top-k items in `recommend` will be recommended.
Returns:
float: NDCG.
"""
if k is None:
k = len(recommend)
def idcg(n_possible_truth):
res = 0.
for n in range(n_possible_truth):
res += 1. / np.log2(n + 2)
return res
dcg = 0.
for n, r in enumerate(recommend[:k]):
if r not in truth:
continue
dcg += 1. / np.log2(n + 2)
res_idcg = idcg(np.min([truth.size, k]))
if res_idcg == 0.:
return 0.
return dcg / res_idcg | Normalized Discounted Cumulative Grain (NDCG).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
k (int): Top-k items in `recommend` will be recommended.
Returns:
float: NDCG. | Below is the the instruction that describes the task:
### Input:
Normalized Discounted Cumulative Grain (NDCG).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
k (int): Top-k items in `recommend` will be recommended.
Returns:
float: NDCG.
### Response:
def ndcg(truth, recommend, k=None):
"""Normalized Discounted Cumulative Grain (NDCG).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
k (int): Top-k items in `recommend` will be recommended.
Returns:
float: NDCG.
"""
if k is None:
k = len(recommend)
def idcg(n_possible_truth):
res = 0.
for n in range(n_possible_truth):
res += 1. / np.log2(n + 2)
return res
dcg = 0.
for n, r in enumerate(recommend[:k]):
if r not in truth:
continue
dcg += 1. / np.log2(n + 2)
res_idcg = idcg(np.min([truth.size, k]))
if res_idcg == 0.:
return 0.
return dcg / res_idcg |
def bind(self, source, dest, destmeth):
"""Guild compatible version of :py:meth:`connect`.
This allows Pyctools compound components to be used in `Guild
<https://github.com/sparkslabs/guild>`_ pipelines.
"""
self.connect(source, getattr(dest, destmeth)) | Guild compatible version of :py:meth:`connect`.
This allows Pyctools compound components to be used in `Guild
<https://github.com/sparkslabs/guild>`_ pipelines. | Below is the the instruction that describes the task:
### Input:
Guild compatible version of :py:meth:`connect`.
This allows Pyctools compound components to be used in `Guild
<https://github.com/sparkslabs/guild>`_ pipelines.
### Response:
def bind(self, source, dest, destmeth):
"""Guild compatible version of :py:meth:`connect`.
This allows Pyctools compound components to be used in `Guild
<https://github.com/sparkslabs/guild>`_ pipelines.
"""
self.connect(source, getattr(dest, destmeth)) |
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain | Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length. | Below is the the instruction that describes the task:
### Input:
Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
### Response:
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain |
def Close(self):
"""Close and destroy the object.
This is similar to Flush, but does not maintain object validity. Hence the
object should not be interacted with after Close().
Raises:
LockError: The lease for this object has expired.
"""
if self.locked and self.CheckLease() == 0:
raise LockError("Can not update lease that has already expired.")
self._WriteAttributes()
# Releasing this lock allows another thread to own it.
if self.locked:
self.transaction.Release()
if self.parent:
self.parent.Close()
# Interacting with a closed object is a bug. We need to catch this ASAP so
# we remove all mode permissions from this object.
self.mode = "" | Close and destroy the object.
This is similar to Flush, but does not maintain object validity. Hence the
object should not be interacted with after Close().
Raises:
LockError: The lease for this object has expired. | Below is the the instruction that describes the task:
### Input:
Close and destroy the object.
This is similar to Flush, but does not maintain object validity. Hence the
object should not be interacted with after Close().
Raises:
LockError: The lease for this object has expired.
### Response:
def Close(self):
"""Close and destroy the object.
This is similar to Flush, but does not maintain object validity. Hence the
object should not be interacted with after Close().
Raises:
LockError: The lease for this object has expired.
"""
if self.locked and self.CheckLease() == 0:
raise LockError("Can not update lease that has already expired.")
self._WriteAttributes()
# Releasing this lock allows another thread to own it.
if self.locked:
self.transaction.Release()
if self.parent:
self.parent.Close()
# Interacting with a closed object is a bug. We need to catch this ASAP so
# we remove all mode permissions from this object.
self.mode = "" |
def rle_decode(mask_rle:str, shape:Tuple[int,int])->NPArrayMask:
"Return an image array from run-length encoded string `mask_rle` with `shape`."
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint)
for low, up in zip(starts, ends): img[low:up] = 1
return img.reshape(shape) | Return an image array from run-length encoded string `mask_rle` with `shape`. | Below is the the instruction that describes the task:
### Input:
Return an image array from run-length encoded string `mask_rle` with `shape`.
### Response:
def rle_decode(mask_rle:str, shape:Tuple[int,int])->NPArrayMask:
"Return an image array from run-length encoded string `mask_rle` with `shape`."
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint)
for low, up in zip(starts, ends): img[low:up] = 1
return img.reshape(shape) |
def retired(self):
"""
Function for generating retired languages. Returns a dict('code', (datetime, [language, ...], 'description')).
"""
def gen():
import csv
import re
from datetime import datetime
from pkg_resources import resource_filename
with open(resource_filename(__package__, 'iso-639-3_Retirements.tab')) as rf:
rtd = list(csv.reader(rf, delimiter='\t'))[1:]
rc = [r[0] for r in rtd]
for i, _, _, m, s, d in rtd:
d = datetime.strptime(d, '%Y-%m-%d')
if not m:
m = re.findall('\[([a-z]{3})\]', s)
if m:
m = [m] if isinstance(m, str) else m
yield i, (d, [self.get(part3=x) for x in m if x not in rc], s)
else:
yield i, (d, [], s)
yield 'sh', self.get(part3='hbs') # Add 'sh' as deprecated
return dict(gen()) | Function for generating retired languages. Returns a dict('code', (datetime, [language, ...], 'description')). | Below is the the instruction that describes the task:
### Input:
Function for generating retired languages. Returns a dict('code', (datetime, [language, ...], 'description')).
### Response:
def retired(self):
"""
Function for generating retired languages. Returns a dict('code', (datetime, [language, ...], 'description')).
"""
def gen():
import csv
import re
from datetime import datetime
from pkg_resources import resource_filename
with open(resource_filename(__package__, 'iso-639-3_Retirements.tab')) as rf:
rtd = list(csv.reader(rf, delimiter='\t'))[1:]
rc = [r[0] for r in rtd]
for i, _, _, m, s, d in rtd:
d = datetime.strptime(d, '%Y-%m-%d')
if not m:
m = re.findall('\[([a-z]{3})\]', s)
if m:
m = [m] if isinstance(m, str) else m
yield i, (d, [self.get(part3=x) for x in m if x not in rc], s)
else:
yield i, (d, [], s)
yield 'sh', self.get(part3='hbs') # Add 'sh' as deprecated
return dict(gen()) |
def add_mvn(self, name, input_name, output_name, across_channels = True, normalize_variance = True, epsilon = 1e-5):
"""
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
across_channels: boolean
If False, each channel plane is normalized separately
If True, mean/variance is computed across all C, H and W dimensions
normalize_variance: boolean
If False, only mean subtraction is performed.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_l2_normalize, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.mvn
spec_layer_params.acrossChannels = across_channels
spec_layer_params.normalizeVariance = normalize_variance
spec_layer_params.epsilon = epsilon | Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
across_channels: boolean
If False, each channel plane is normalized separately
If True, mean/variance is computed across all C, H and W dimensions
normalize_variance: boolean
If False, only mean subtraction is performed.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_l2_normalize, add_lrn | Below is the the instruction that describes the task:
### Input:
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
across_channels: boolean
If False, each channel plane is normalized separately
If True, mean/variance is computed across all C, H and W dimensions
normalize_variance: boolean
If False, only mean subtraction is performed.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_l2_normalize, add_lrn
### Response:
def add_mvn(self, name, input_name, output_name, across_channels = True, normalize_variance = True, epsilon = 1e-5):
"""
Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
across_channels: boolean
If False, each channel plane is normalized separately
If True, mean/variance is computed across all C, H and W dimensions
normalize_variance: boolean
If False, only mean subtraction is performed.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_l2_normalize, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.mvn
spec_layer_params.acrossChannels = across_channels
spec_layer_params.normalizeVariance = normalize_variance
spec_layer_params.epsilon = epsilon |
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument, kwargs) | A variant that specifies the time as a relative time.
This is actually the more commonly used interface. | Below is the the instruction that describes the task:
### Input:
A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
### Response:
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument, kwargs) |
def _alignment(elist, flist, e2f, f2e):
'''
elist, flist
wordlist for each language
e2f
translatoin alignment from e to f
alignment is
[(e, f)]
f2e
translatoin alignment from f to e
alignment is
[(e, f)]
return
alignment: {(f, e)}
flist
-----------------
e | |
l | |
i | |
s | |
t | |
-----------------
'''
neighboring = {(-1, 0), (0, -1), (1, 0), (0, 1),
(-1, -1), (-1, 1), (1, -1), (1, 1)}
e2f = set(e2f)
f2e = set(f2e)
m = len(elist)
n = len(flist)
alignment = e2f.intersection(f2e)
# marge with neighborhood
while True:
set_len = len(alignment)
for e_word in range(1, m+1):
for f_word in range(1, n+1):
if (e_word, f_word) in alignment:
for (e_diff, f_diff) in neighboring:
e_new = e_word + e_diff
f_new = f_word + f_diff
if not alignment:
if (e_new, f_new) in e2f.union(f2e):
alignment.add((e_new, f_new))
else:
if ((e_new not in list(zip(*alignment))[0]
or f_new not in list(zip(*alignment))[1])
and (e_new, f_new) in e2f.union(f2e)):
alignment.add((e_new, f_new))
if set_len == len(alignment):
break
# finalize
for e_word in range(1, m+1):
for f_word in range(1, n+1):
# for alignment = set([])
if not alignment:
if (e_word, f_word) in e2f.union(f2e):
alignment.add((e_word, f_word))
else:
if ((e_word not in list(zip(*alignment))[0]
or f_word not in list(zip(*alignment))[1])
and (e_word, f_word) in e2f.union(f2e)):
alignment.add((e_word, f_word))
return alignment | elist, flist
wordlist for each language
e2f
translatoin alignment from e to f
alignment is
[(e, f)]
f2e
translatoin alignment from f to e
alignment is
[(e, f)]
return
alignment: {(f, e)}
flist
-----------------
e | |
l | |
i | |
s | |
t | |
----------------- | Below is the the instruction that describes the task:
### Input:
elist, flist
wordlist for each language
e2f
translatoin alignment from e to f
alignment is
[(e, f)]
f2e
translatoin alignment from f to e
alignment is
[(e, f)]
return
alignment: {(f, e)}
flist
-----------------
e | |
l | |
i | |
s | |
t | |
-----------------
### Response:
def _alignment(elist, flist, e2f, f2e):
'''
elist, flist
wordlist for each language
e2f
translatoin alignment from e to f
alignment is
[(e, f)]
f2e
translatoin alignment from f to e
alignment is
[(e, f)]
return
alignment: {(f, e)}
flist
-----------------
e | |
l | |
i | |
s | |
t | |
-----------------
'''
neighboring = {(-1, 0), (0, -1), (1, 0), (0, 1),
(-1, -1), (-1, 1), (1, -1), (1, 1)}
e2f = set(e2f)
f2e = set(f2e)
m = len(elist)
n = len(flist)
alignment = e2f.intersection(f2e)
# marge with neighborhood
while True:
set_len = len(alignment)
for e_word in range(1, m+1):
for f_word in range(1, n+1):
if (e_word, f_word) in alignment:
for (e_diff, f_diff) in neighboring:
e_new = e_word + e_diff
f_new = f_word + f_diff
if not alignment:
if (e_new, f_new) in e2f.union(f2e):
alignment.add((e_new, f_new))
else:
if ((e_new not in list(zip(*alignment))[0]
or f_new not in list(zip(*alignment))[1])
and (e_new, f_new) in e2f.union(f2e)):
alignment.add((e_new, f_new))
if set_len == len(alignment):
break
# finalize
for e_word in range(1, m+1):
for f_word in range(1, n+1):
# for alignment = set([])
if not alignment:
if (e_word, f_word) in e2f.union(f2e):
alignment.add((e_word, f_word))
else:
if ((e_word not in list(zip(*alignment))[0]
or f_word not in list(zip(*alignment))[1])
and (e_word, f_word) in e2f.union(f2e)):
alignment.add((e_word, f_word))
return alignment |
def fix_header(pofile):
"""
Replace default headers with edX headers
"""
# By default, django-admin.py makemessages creates this header:
#
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
pofile.metadata_is_fuzzy = [] # remove [u'fuzzy']
header = pofile.header
fixes = (
('SOME DESCRIPTIVE TITLE', EDX_MARKER),
('Translations template for PROJECT.', EDX_MARKER),
('YEAR', str(datetime.utcnow().year)),
('ORGANIZATION', 'edX'),
("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"),
(
'This file is distributed under the same license as the PROJECT project.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
(
'This file is distributed under the same license as the PACKAGE package.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <info@edx.org>'),
)
for src, dest in fixes:
header = header.replace(src, dest)
pofile.header = header | Replace default headers with edX headers | Below is the the instruction that describes the task:
### Input:
Replace default headers with edX headers
### Response:
def fix_header(pofile):
"""
Replace default headers with edX headers
"""
# By default, django-admin.py makemessages creates this header:
#
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
pofile.metadata_is_fuzzy = [] # remove [u'fuzzy']
header = pofile.header
fixes = (
('SOME DESCRIPTIVE TITLE', EDX_MARKER),
('Translations template for PROJECT.', EDX_MARKER),
('YEAR', str(datetime.utcnow().year)),
('ORGANIZATION', 'edX'),
("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"),
(
'This file is distributed under the same license as the PROJECT project.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
(
'This file is distributed under the same license as the PACKAGE package.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <info@edx.org>'),
)
for src, dest in fixes:
header = header.replace(src, dest)
pofile.header = header |
def isclose(a, b, rel_tol=1e-10, abs_tol=0.0):
"""
Compares two parameter values.
:param a: First parameter
:param b: Second parameter
:param rel_tol: Relative tolerance
:param abs_tol: Absolute tolerance
:return: Boolean telling whether or not the parameters are close enough to be the same
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) | Compares two parameter values.
:param a: First parameter
:param b: Second parameter
:param rel_tol: Relative tolerance
:param abs_tol: Absolute tolerance
:return: Boolean telling whether or not the parameters are close enough to be the same | Below is the the instruction that describes the task:
### Input:
Compares two parameter values.
:param a: First parameter
:param b: Second parameter
:param rel_tol: Relative tolerance
:param abs_tol: Absolute tolerance
:return: Boolean telling whether or not the parameters are close enough to be the same
### Response:
def isclose(a, b, rel_tol=1e-10, abs_tol=0.0):
"""
Compares two parameter values.
:param a: First parameter
:param b: Second parameter
:param rel_tol: Relative tolerance
:param abs_tol: Absolute tolerance
:return: Boolean telling whether or not the parameters are close enough to be the same
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) |
def create_widget(self):
""" Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
"""
d = self.declaration
style = d.style or '@style/Widget.DeviceDefault.PopupMenu'
self.window = PopupWindow(self.get_context(), None, 0, style)
self.showing = False | Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent. | Below is the the instruction that describes the task:
### Input:
Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
### Response:
def create_widget(self):
""" Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
"""
d = self.declaration
style = d.style or '@style/Widget.DeviceDefault.PopupMenu'
self.window = PopupWindow(self.get_context(), None, 0, style)
self.showing = False |
def revoke_role(self, role_name, principal_name, principal_type):
"""
Parameters:
- role_name
- principal_name
- principal_type
"""
self.send_revoke_role(role_name, principal_name, principal_type)
return self.recv_revoke_role() | Parameters:
- role_name
- principal_name
- principal_type | Below is the the instruction that describes the task:
### Input:
Parameters:
- role_name
- principal_name
- principal_type
### Response:
def revoke_role(self, role_name, principal_name, principal_type):
"""
Parameters:
- role_name
- principal_name
- principal_type
"""
self.send_revoke_role(role_name, principal_name, principal_type)
return self.recv_revoke_role() |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EndecaDgraphCollector, self).get_default_config()
config.update({
'path': 'endeca.dgraph',
'host': 'localhost',
'port': 8080,
'timeout': 1,
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(EndecaDgraphCollector, self).get_default_config()
config.update({
'path': 'endeca.dgraph',
'host': 'localhost',
'port': 8080,
'timeout': 1,
})
return config |
def get_registry_value(key, default=None):
"""
Gets the utility for IRegistry and returns the value for the key passed in.
If there is no value for the key passed in, returns default value
:param key: the key in the registry to look for
:param default: default value if the key is not registered
:return: value in the registry for the key passed in
"""
registry = queryUtility(IRegistry)
return registry.get(key, default) | Gets the utility for IRegistry and returns the value for the key passed in.
If there is no value for the key passed in, returns default value
:param key: the key in the registry to look for
:param default: default value if the key is not registered
:return: value in the registry for the key passed in | Below is the the instruction that describes the task:
### Input:
Gets the utility for IRegistry and returns the value for the key passed in.
If there is no value for the key passed in, returns default value
:param key: the key in the registry to look for
:param default: default value if the key is not registered
:return: value in the registry for the key passed in
### Response:
def get_registry_value(key, default=None):
"""
Gets the utility for IRegistry and returns the value for the key passed in.
If there is no value for the key passed in, returns default value
:param key: the key in the registry to look for
:param default: default value if the key is not registered
:return: value in the registry for the key passed in
"""
registry = queryUtility(IRegistry)
return registry.get(key, default) |
def parse_size_name(type_name):
"""Calculate size and encoding from a type name.
This method takes a C-style type string like uint8_t[10] and returns
- the total size in bytes
- the unit size of each member (if it's an array)
- the scruct.{pack,unpack} format code for decoding the base type
- whether it is an array.
"""
if ' ' in type_name:
raise ArgumentError("There should not be a space in config variable type specifier", specifier=type_name)
variable = False
count = 1
base_type = type_name
if type_name[-1] == ']':
variable = True
start_index = type_name.find('[')
if start_index == -1:
raise ArgumentError("Could not find matching [ for ] character", specifier=type_name)
count = int(type_name[start_index+1:-1], 0)
base_type = type_name[:start_index]
matched_type = TYPE_CODES.get(base_type)
if matched_type is None:
raise ArgumentError("Could not find base type name", base_type=base_type, type_string=type_name)
base_size = struct.calcsize("<%s" % matched_type)
total_size = base_size*count
return total_size, base_size, matched_type, variable | Calculate size and encoding from a type name.
This method takes a C-style type string like uint8_t[10] and returns
- the total size in bytes
- the unit size of each member (if it's an array)
- the scruct.{pack,unpack} format code for decoding the base type
- whether it is an array. | Below is the the instruction that describes the task:
### Input:
Calculate size and encoding from a type name.
This method takes a C-style type string like uint8_t[10] and returns
- the total size in bytes
- the unit size of each member (if it's an array)
- the scruct.{pack,unpack} format code for decoding the base type
- whether it is an array.
### Response:
def parse_size_name(type_name):
"""Calculate size and encoding from a type name.
This method takes a C-style type string like uint8_t[10] and returns
- the total size in bytes
- the unit size of each member (if it's an array)
- the scruct.{pack,unpack} format code for decoding the base type
- whether it is an array.
"""
if ' ' in type_name:
raise ArgumentError("There should not be a space in config variable type specifier", specifier=type_name)
variable = False
count = 1
base_type = type_name
if type_name[-1] == ']':
variable = True
start_index = type_name.find('[')
if start_index == -1:
raise ArgumentError("Could not find matching [ for ] character", specifier=type_name)
count = int(type_name[start_index+1:-1], 0)
base_type = type_name[:start_index]
matched_type = TYPE_CODES.get(base_type)
if matched_type is None:
raise ArgumentError("Could not find base type name", base_type=base_type, type_string=type_name)
base_size = struct.calcsize("<%s" % matched_type)
total_size = base_size*count
return total_size, base_size, matched_type, variable |
def encrypt_to_file(contents, filename):
"""
Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`.
"""
if not filename.endswith('.enc'):
raise ValueError("%s does not end with .enc" % filename)
key = Fernet.generate_key()
fer = Fernet(key)
encrypted_file = fer.encrypt(contents)
with open(filename, 'wb') as f:
f.write(encrypted_file)
return key | Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`. | Below is the the instruction that describes the task:
### Input:
Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`.
### Response:
def encrypt_to_file(contents, filename):
"""
Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`.
"""
if not filename.endswith('.enc'):
raise ValueError("%s does not end with .enc" % filename)
key = Fernet.generate_key()
fer = Fernet(key)
encrypted_file = fer.encrypt(contents)
with open(filename, 'wb') as f:
f.write(encrypted_file)
return key |
def _call_yum(args, **kwargs):
'''
Call yum/dnf.
'''
params = {'output_loglevel': 'trace',
'python_shell': False,
'env': salt.utils.environment.get_module_environment(globals())}
params.update(kwargs)
cmd = []
if salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.append(_yum())
cmd.extend(args)
return __salt__['cmd.run_all'](cmd, **params) | Call yum/dnf. | Below is the the instruction that describes the task:
### Input:
Call yum/dnf.
### Response:
def _call_yum(args, **kwargs):
'''
Call yum/dnf.
'''
params = {'output_loglevel': 'trace',
'python_shell': False,
'env': salt.utils.environment.get_module_environment(globals())}
params.update(kwargs)
cmd = []
if salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.append(_yum())
cmd.extend(args)
return __salt__['cmd.run_all'](cmd, **params) |
def filter_data(self, min_len, max_len):
"""
Preserves only samples which satisfy the following inequality:
min_len <= sample sequence length <= max_len
:param min_len: minimum sequence length
:param max_len: maximum sequence length
"""
logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}')
initial_len = len(self.src)
filtered_src = []
for src in self.src:
if min_len <= len(src) <= max_len:
filtered_src.append(src)
self.src = filtered_src
filtered_len = len(self.src)
logging.info(f'Pairs before: {initial_len}, after: {filtered_len}') | Preserves only samples which satisfy the following inequality:
min_len <= sample sequence length <= max_len
:param min_len: minimum sequence length
:param max_len: maximum sequence length | Below is the the instruction that describes the task:
### Input:
Preserves only samples which satisfy the following inequality:
min_len <= sample sequence length <= max_len
:param min_len: minimum sequence length
:param max_len: maximum sequence length
### Response:
def filter_data(self, min_len, max_len):
"""
Preserves only samples which satisfy the following inequality:
min_len <= sample sequence length <= max_len
:param min_len: minimum sequence length
:param max_len: maximum sequence length
"""
logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}')
initial_len = len(self.src)
filtered_src = []
for src in self.src:
if min_len <= len(src) <= max_len:
filtered_src.append(src)
self.src = filtered_src
filtered_len = len(self.src)
logging.info(f'Pairs before: {initial_len}, after: {filtered_len}') |
def clone(id, path):
"""
- Download all files in a dataset or from a Job output
Eg: alice/projects/mnist/1/files, alice/projects/mnist/1/output or alice/dataset/mnist-data/1/
Using /output will download the files that are saved at the end of the job.
Note: This will download the files that are saved at
the end of the job.
- Download a directory from a dataset or from Job output
Specify the path to a directory and download all its files and subdirectories.
Eg: --path models/checkpoint1
"""
data_source = get_data_object(id, use_data_config=False)
if not data_source:
if 'output' in id:
floyd_logger.info("Note: You cannot clone the output of a running job. You need to wait for it to finish.")
sys.exit()
if path:
# Download a directory from Dataset or Files
# Get the type of data resource from the id (foo/projects/bar/ or foo/datasets/bar/)
if '/datasets/' in id:
resource_type = 'data'
resource_id = data_source.id
else:
resource_type = 'files'
try:
experiment = ExperimentClient().get(normalize_job_name(id, use_config=False))
except FloydException:
experiment = ExperimentClient().get(id)
resource_id = experiment.id
data_url = "{}/api/v1/download/artifacts/{}/{}?is_dir=true&path={}".format(floyd.floyd_host,
resource_type,
resource_id,
path)
else:
# Download the full Dataset
data_url = "{}/api/v1/resources/{}?content=true&download=true".format(floyd.floyd_host,
data_source.resource_id)
DataClient().download_tar(url=data_url,
untar=True,
delete_after_untar=True) | - Download all files in a dataset or from a Job output
Eg: alice/projects/mnist/1/files, alice/projects/mnist/1/output or alice/dataset/mnist-data/1/
Using /output will download the files that are saved at the end of the job.
Note: This will download the files that are saved at
the end of the job.
- Download a directory from a dataset or from Job output
Specify the path to a directory and download all its files and subdirectories.
Eg: --path models/checkpoint1 | Below is the the instruction that describes the task:
### Input:
- Download all files in a dataset or from a Job output
Eg: alice/projects/mnist/1/files, alice/projects/mnist/1/output or alice/dataset/mnist-data/1/
Using /output will download the files that are saved at the end of the job.
Note: This will download the files that are saved at
the end of the job.
- Download a directory from a dataset or from Job output
Specify the path to a directory and download all its files and subdirectories.
Eg: --path models/checkpoint1
### Response:
def clone(id, path):
"""
- Download all files in a dataset or from a Job output
Eg: alice/projects/mnist/1/files, alice/projects/mnist/1/output or alice/dataset/mnist-data/1/
Using /output will download the files that are saved at the end of the job.
Note: This will download the files that are saved at
the end of the job.
- Download a directory from a dataset or from Job output
Specify the path to a directory and download all its files and subdirectories.
Eg: --path models/checkpoint1
"""
data_source = get_data_object(id, use_data_config=False)
if not data_source:
if 'output' in id:
floyd_logger.info("Note: You cannot clone the output of a running job. You need to wait for it to finish.")
sys.exit()
if path:
# Download a directory from Dataset or Files
# Get the type of data resource from the id (foo/projects/bar/ or foo/datasets/bar/)
if '/datasets/' in id:
resource_type = 'data'
resource_id = data_source.id
else:
resource_type = 'files'
try:
experiment = ExperimentClient().get(normalize_job_name(id, use_config=False))
except FloydException:
experiment = ExperimentClient().get(id)
resource_id = experiment.id
data_url = "{}/api/v1/download/artifacts/{}/{}?is_dir=true&path={}".format(floyd.floyd_host,
resource_type,
resource_id,
path)
else:
# Download the full Dataset
data_url = "{}/api/v1/resources/{}?content=true&download=true".format(floyd.floyd_host,
data_source.resource_id)
DataClient().download_tar(url=data_url,
untar=True,
delete_after_untar=True) |
def remove_useless(self):
"""Returns a new grammar containing just useful rules."""
if not self.is_contextfree():
raise ValueError("grammar must be context-free")
by_lhs = collections.defaultdict(list)
by_rhs = collections.defaultdict(list)
for [lhs], rhs in self.rules:
by_lhs[lhs].append((lhs, rhs))
for y in rhs:
if y in self.nonterminals:
by_rhs[y].append((lhs, rhs))
agenda = collections.deque([self.start])
reachable = set()
while len(agenda) > 0:
x = agenda.popleft()
if x in reachable: continue
reachable.add(x)
for _, rhs in by_lhs[x]:
for y in rhs:
if y in by_lhs:
agenda.append(y)
agenda = collections.deque()
productive = set()
for [lhs], rhs in self.rules:
if all(y not in self.nonterminals for y in rhs):
agenda.append(lhs)
while len(agenda) > 0:
y = agenda.popleft()
if y in productive: continue
productive.add(y)
for lhs, rhs in by_rhs[y]:
if all(y not in self.nonterminals or y in productive for y in rhs):
agenda.append(lhs)
g = Grammar()
g.set_start(self.start)
for [lhs], rhs in self.rules:
if (lhs in reachable & productive and
all(y not in self.nonterminals or y in reachable & productive for y in rhs)):
g.add_rule([lhs], rhs)
return g | Returns a new grammar containing just useful rules. | Below is the the instruction that describes the task:
### Input:
Returns a new grammar containing just useful rules.
### Response:
def remove_useless(self):
"""Returns a new grammar containing just useful rules."""
if not self.is_contextfree():
raise ValueError("grammar must be context-free")
by_lhs = collections.defaultdict(list)
by_rhs = collections.defaultdict(list)
for [lhs], rhs in self.rules:
by_lhs[lhs].append((lhs, rhs))
for y in rhs:
if y in self.nonterminals:
by_rhs[y].append((lhs, rhs))
agenda = collections.deque([self.start])
reachable = set()
while len(agenda) > 0:
x = agenda.popleft()
if x in reachable: continue
reachable.add(x)
for _, rhs in by_lhs[x]:
for y in rhs:
if y in by_lhs:
agenda.append(y)
agenda = collections.deque()
productive = set()
for [lhs], rhs in self.rules:
if all(y not in self.nonterminals for y in rhs):
agenda.append(lhs)
while len(agenda) > 0:
y = agenda.popleft()
if y in productive: continue
productive.add(y)
for lhs, rhs in by_rhs[y]:
if all(y not in self.nonterminals or y in productive for y in rhs):
agenda.append(lhs)
g = Grammar()
g.set_start(self.start)
for [lhs], rhs in self.rules:
if (lhs in reachable & productive and
all(y not in self.nonterminals or y in reachable & productive for y in rhs)):
g.add_rule([lhs], rhs)
return g |
def load_project(cls, fname, auto_update=None, make_plot=True,
draw=False, alternative_axes=None, main=False,
encoding=None, enable_post=False, new_fig=True,
clear=None, **kwargs):
"""
Load a project from a file or dict
This classmethod allows to load a project that has been stored using
the :meth:`save_project` method and reads all the data and creates the
figures.
Since the data is stored in external files when saving a project,
make sure that the data is accessible under the relative paths
as stored in the file `fname` or from the current working directory
if `fname` is a dictionary. Alternatively use the `alternative_paths`
parameter or the `pwd` parameter
Parameters
----------
fname: str or dict
The string might be the path to a file created with the
:meth:`save_project` method, or it might be a dictionary from this
method
%(InteractiveBase.parameters.auto_update)s
%(Project._add_data.parameters.make_plot)s
%(InteractiveBase.start_update.parameters.draw)s
alternative_axes: dict, None or list
alternative axes instances to use
- If it is None, the axes and figures from the saving point will be
reproduced.
- a dictionary should map from array names in the created
project to matplotlib axes instances
- a list should contain axes instances that will be used for
iteration
main: bool, optional
If True, a new main project is created and returned.
Otherwise (by default default) the data is added to the current
main project.
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. Do only set this
parameter to ``True`` if you know you can trust the information in
`fname`
new_fig: bool
If True (default) and `alternative_axes` is None, new figures are
created if the figure already exists
%(Project._add_data.parameters.clear)s
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
%(ArrayList.from_dict.parameters.no_d|pwd)s
Other Parameters
----------------
%(ArrayList.from_dict.parameters)s
Returns
-------
Project
The project in state of the saving point"""
from pkg_resources import iter_entry_points
def get_ax_base(name, alternatives):
ax_base = next(iter(obj(arr_name=name).axes), None)
if ax_base is None:
ax_base = next(iter(obj(arr_name=alternatives).axes), None)
if ax_base is not None:
alternatives.difference_update(obj(ax=ax_base).arr_names)
return ax_base
pwd = kwargs.pop('pwd', None)
if isinstance(fname, six.string_types):
with open(fname, 'rb') as f:
pickle_kws = {} if not encoding else {'encoding': encoding}
d = pickle.load(f, **pickle_kws)
pwd = pwd or os.path.dirname(fname)
else:
d = dict(fname)
pwd = pwd or getcwd()
# check for patches of plugins
for ep in iter_entry_points('psyplot', name='patches'):
patches = ep.load()
for arr_d in d.get('arrays').values():
plotter_cls = arr_d.get('plotter', {}).get('cls')
if plotter_cls is not None and plotter_cls in patches:
# apply the patch
patches[plotter_cls](arr_d['plotter'],
d.get('versions', {}))
fig_map = {}
if alternative_axes is None:
for fig_dict in six.itervalues(d.get('figs', {})):
orig_num = fig_dict.get('num') or 1
fig_map[orig_num] = _ProjectLoader.load_figure(
fig_dict, new_fig=new_fig).number
elif not isinstance(alternative_axes, dict):
alternative_axes = cycle(iter(alternative_axes))
obj = cls.from_dict(d['arrays'], pwd=pwd, **kwargs)
if main:
# we create a new project with the project factory to make sure
# that everything is handled correctly
obj = project(None, obj)
axes = {}
arr_names = obj.arr_names
sharex = defaultdict(set)
sharey = defaultdict(set)
for arr, (arr_name, arr_dict) in zip(
obj, filter(lambda t: t[0] in arr_names,
six.iteritems(d['arrays']))):
if not arr_dict.get('plotter'):
continue
plot_dict = arr_dict['plotter']
plotter_cls = getattr(
import_module(plot_dict['cls'][0]), plot_dict['cls'][1])
ax = None
if alternative_axes is not None:
if isinstance(alternative_axes, dict):
ax = alternative_axes.get(arr.arr_name)
else:
ax = next(alternative_axes, None)
if ax is None and 'ax' in plot_dict:
already_opened = plot_dict['ax'].get(
'shared', set()).intersection(axes)
if already_opened:
ax = axes[next(iter(already_opened))]
else:
plot_dict['ax'].pop('shared', None)
plot_dict['ax']['fig'] = fig_map[
plot_dict['ax'].get('fig') or 1]
if plot_dict['ax'].get('sharex'):
sharex[plot_dict['ax'].pop('sharex')].add(
arr.psy.arr_name)
if plot_dict['ax'].get('sharey'):
sharey[plot_dict['ax'].pop('sharey')].add(
arr.psy.arr_name)
axes[arr.psy.arr_name] = ax = _ProjectLoader.load_axes(
plot_dict['ax'])
plotter_cls(
arr, make_plot=False, draw=False, clear=False,
ax=ax, project=obj.main, enable_post=enable_post,
**plot_dict['fmt'])
# handle shared x and y-axes
for key, names in sharex.items():
ax_base = get_ax_base(key, names)
if ax_base is not None:
ax_base.get_shared_x_axes().join(
ax_base, *obj(arr_name=names).axes)
for ax in obj(arr_name=names).axes:
ax._sharex = ax_base
for key, names in sharey.items():
ax_base = get_ax_base(key, names)
if ax_base is not None:
ax_base.get_shared_y_axes().join(
ax_base, *obj(arr_name=names).axes)
for ax in obj(arr_name=names).axes:
ax._sharey = ax_base
for arr in obj.with_plotter:
shared = d['arrays'][arr.psy.arr_name]['plotter'].get('shared', {})
for key, arr_names in six.iteritems(shared):
arr.psy.plotter.share(obj(arr_name=arr_names).plotters,
keys=[key])
if make_plot:
for plotter in obj.plotters:
plotter.reinit(
draw=False,
clear=clear or (
clear is None and
plotter_cls._get_sample_projection() is not None))
if draw is None:
draw = rcParams['auto_draw']
if draw:
obj.draw()
if rcParams['auto_show']:
obj.show()
if auto_update is None:
auto_update = rcParams['lists.auto_update']
if not main:
obj._main = gcp(True)
obj.main.extend(obj, new_name=True)
obj.no_auto_update = not auto_update
scp(obj)
return obj | Load a project from a file or dict
This classmethod allows to load a project that has been stored using
the :meth:`save_project` method and reads all the data and creates the
figures.
Since the data is stored in external files when saving a project,
make sure that the data is accessible under the relative paths
as stored in the file `fname` or from the current working directory
if `fname` is a dictionary. Alternatively use the `alternative_paths`
parameter or the `pwd` parameter
Parameters
----------
fname: str or dict
The string might be the path to a file created with the
:meth:`save_project` method, or it might be a dictionary from this
method
%(InteractiveBase.parameters.auto_update)s
%(Project._add_data.parameters.make_plot)s
%(InteractiveBase.start_update.parameters.draw)s
alternative_axes: dict, None or list
alternative axes instances to use
- If it is None, the axes and figures from the saving point will be
reproduced.
- a dictionary should map from array names in the created
project to matplotlib axes instances
- a list should contain axes instances that will be used for
iteration
main: bool, optional
If True, a new main project is created and returned.
Otherwise (by default default) the data is added to the current
main project.
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. Do only set this
parameter to ``True`` if you know you can trust the information in
`fname`
new_fig: bool
If True (default) and `alternative_axes` is None, new figures are
created if the figure already exists
%(Project._add_data.parameters.clear)s
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
%(ArrayList.from_dict.parameters.no_d|pwd)s
Other Parameters
----------------
%(ArrayList.from_dict.parameters)s
Returns
-------
Project
The project in state of the saving point | Below is the the instruction that describes the task:
### Input:
Load a project from a file or dict
This classmethod allows to load a project that has been stored using
the :meth:`save_project` method and reads all the data and creates the
figures.
Since the data is stored in external files when saving a project,
make sure that the data is accessible under the relative paths
as stored in the file `fname` or from the current working directory
if `fname` is a dictionary. Alternatively use the `alternative_paths`
parameter or the `pwd` parameter
Parameters
----------
fname: str or dict
The string might be the path to a file created with the
:meth:`save_project` method, or it might be a dictionary from this
method
%(InteractiveBase.parameters.auto_update)s
%(Project._add_data.parameters.make_plot)s
%(InteractiveBase.start_update.parameters.draw)s
alternative_axes: dict, None or list
alternative axes instances to use
- If it is None, the axes and figures from the saving point will be
reproduced.
- a dictionary should map from array names in the created
project to matplotlib axes instances
- a list should contain axes instances that will be used for
iteration
main: bool, optional
If True, a new main project is created and returned.
Otherwise (by default default) the data is added to the current
main project.
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. Do only set this
parameter to ``True`` if you know you can trust the information in
`fname`
new_fig: bool
If True (default) and `alternative_axes` is None, new figures are
created if the figure already exists
%(Project._add_data.parameters.clear)s
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
%(ArrayList.from_dict.parameters.no_d|pwd)s
Other Parameters
----------------
%(ArrayList.from_dict.parameters)s
Returns
-------
Project
The project in state of the saving point
### Response:
def load_project(cls, fname, auto_update=None, make_plot=True,
draw=False, alternative_axes=None, main=False,
encoding=None, enable_post=False, new_fig=True,
clear=None, **kwargs):
"""
Load a project from a file or dict
This classmethod allows to load a project that has been stored using
the :meth:`save_project` method and reads all the data and creates the
figures.
Since the data is stored in external files when saving a project,
make sure that the data is accessible under the relative paths
as stored in the file `fname` or from the current working directory
if `fname` is a dictionary. Alternatively use the `alternative_paths`
parameter or the `pwd` parameter
Parameters
----------
fname: str or dict
The string might be the path to a file created with the
:meth:`save_project` method, or it might be a dictionary from this
method
%(InteractiveBase.parameters.auto_update)s
%(Project._add_data.parameters.make_plot)s
%(InteractiveBase.start_update.parameters.draw)s
alternative_axes: dict, None or list
alternative axes instances to use
- If it is None, the axes and figures from the saving point will be
reproduced.
- a dictionary should map from array names in the created
project to matplotlib axes instances
- a list should contain axes instances that will be used for
iteration
main: bool, optional
If True, a new main project is created and returned.
Otherwise (by default default) the data is added to the current
main project.
encoding: str
The encoding to use for loading the project. If None, it is
automatically determined by pickle. Note: Set this to ``'latin1'``
if using a project created with python2 on python3.
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. Do only set this
parameter to ``True`` if you know you can trust the information in
`fname`
new_fig: bool
If True (default) and `alternative_axes` is None, new figures are
created if the figure already exists
%(Project._add_data.parameters.clear)s
pwd: str or None, optional
Path to the working directory from where the data can be imported.
If None and `fname` is the path to a file, `pwd` is set to the
directory of this file. Otherwise the current working directory is
used.
%(ArrayList.from_dict.parameters.no_d|pwd)s
Other Parameters
----------------
%(ArrayList.from_dict.parameters)s
Returns
-------
Project
The project in state of the saving point"""
from pkg_resources import iter_entry_points
def get_ax_base(name, alternatives):
ax_base = next(iter(obj(arr_name=name).axes), None)
if ax_base is None:
ax_base = next(iter(obj(arr_name=alternatives).axes), None)
if ax_base is not None:
alternatives.difference_update(obj(ax=ax_base).arr_names)
return ax_base
pwd = kwargs.pop('pwd', None)
if isinstance(fname, six.string_types):
with open(fname, 'rb') as f:
pickle_kws = {} if not encoding else {'encoding': encoding}
d = pickle.load(f, **pickle_kws)
pwd = pwd or os.path.dirname(fname)
else:
d = dict(fname)
pwd = pwd or getcwd()
# check for patches of plugins
for ep in iter_entry_points('psyplot', name='patches'):
patches = ep.load()
for arr_d in d.get('arrays').values():
plotter_cls = arr_d.get('plotter', {}).get('cls')
if plotter_cls is not None and plotter_cls in patches:
# apply the patch
patches[plotter_cls](arr_d['plotter'],
d.get('versions', {}))
fig_map = {}
if alternative_axes is None:
for fig_dict in six.itervalues(d.get('figs', {})):
orig_num = fig_dict.get('num') or 1
fig_map[orig_num] = _ProjectLoader.load_figure(
fig_dict, new_fig=new_fig).number
elif not isinstance(alternative_axes, dict):
alternative_axes = cycle(iter(alternative_axes))
obj = cls.from_dict(d['arrays'], pwd=pwd, **kwargs)
if main:
# we create a new project with the project factory to make sure
# that everything is handled correctly
obj = project(None, obj)
axes = {}
arr_names = obj.arr_names
sharex = defaultdict(set)
sharey = defaultdict(set)
for arr, (arr_name, arr_dict) in zip(
obj, filter(lambda t: t[0] in arr_names,
six.iteritems(d['arrays']))):
if not arr_dict.get('plotter'):
continue
plot_dict = arr_dict['plotter']
plotter_cls = getattr(
import_module(plot_dict['cls'][0]), plot_dict['cls'][1])
ax = None
if alternative_axes is not None:
if isinstance(alternative_axes, dict):
ax = alternative_axes.get(arr.arr_name)
else:
ax = next(alternative_axes, None)
if ax is None and 'ax' in plot_dict:
already_opened = plot_dict['ax'].get(
'shared', set()).intersection(axes)
if already_opened:
ax = axes[next(iter(already_opened))]
else:
plot_dict['ax'].pop('shared', None)
plot_dict['ax']['fig'] = fig_map[
plot_dict['ax'].get('fig') or 1]
if plot_dict['ax'].get('sharex'):
sharex[plot_dict['ax'].pop('sharex')].add(
arr.psy.arr_name)
if plot_dict['ax'].get('sharey'):
sharey[plot_dict['ax'].pop('sharey')].add(
arr.psy.arr_name)
axes[arr.psy.arr_name] = ax = _ProjectLoader.load_axes(
plot_dict['ax'])
plotter_cls(
arr, make_plot=False, draw=False, clear=False,
ax=ax, project=obj.main, enable_post=enable_post,
**plot_dict['fmt'])
# handle shared x and y-axes
for key, names in sharex.items():
ax_base = get_ax_base(key, names)
if ax_base is not None:
ax_base.get_shared_x_axes().join(
ax_base, *obj(arr_name=names).axes)
for ax in obj(arr_name=names).axes:
ax._sharex = ax_base
for key, names in sharey.items():
ax_base = get_ax_base(key, names)
if ax_base is not None:
ax_base.get_shared_y_axes().join(
ax_base, *obj(arr_name=names).axes)
for ax in obj(arr_name=names).axes:
ax._sharey = ax_base
for arr in obj.with_plotter:
shared = d['arrays'][arr.psy.arr_name]['plotter'].get('shared', {})
for key, arr_names in six.iteritems(shared):
arr.psy.plotter.share(obj(arr_name=arr_names).plotters,
keys=[key])
if make_plot:
for plotter in obj.plotters:
plotter.reinit(
draw=False,
clear=clear or (
clear is None and
plotter_cls._get_sample_projection() is not None))
if draw is None:
draw = rcParams['auto_draw']
if draw:
obj.draw()
if rcParams['auto_show']:
obj.show()
if auto_update is None:
auto_update = rcParams['lists.auto_update']
if not main:
obj._main = gcp(True)
obj.main.extend(obj, new_name=True)
obj.no_auto_update = not auto_update
scp(obj)
return obj |
def turnstile_command(conf_file, command, arguments=[], channel=None,
debug=False):
"""
Issue a command to all running control daemons.
:param conf_file: Name of the configuration file.
:param command: The command to execute. Note that 'ping' is
handled specially; in particular, the "channel"
parameter is implied. (A random value will be
used for the channel to listen on.)
:param arguments: A list of arguments for the command. Note that
the colon character (':') cannot be used.
:param channel: If not None, specifies the name of a message
channel to listen for responses on. Will wait
indefinitely; to terminate the listening loop, use
the keyboard interrupt sequence.
:param debug: If True, debugging messages are emitted while
sending the command.
"""
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
control_channel = conf['control'].get('channel', 'control')
# Now, set up the command
command = command.lower()
ts_conv = False
if command == 'ping':
# We handle 'ping' specially; first, figure out the channel
if arguments:
channel = arguments[0]
else:
channel = str(uuid.uuid4())
arguments = [channel]
# Next, add on a timestamp
if len(arguments) < 2:
arguments.append(time.time())
ts_conv = True
# Limit the argument list length
arguments = arguments[:2]
# OK, the command is all set up. Let us now send the command...
if debug:
cmd = [command] + arguments
print >>sys.stderr, ("Issuing command: %s" %
' '.join(cmd))
database.command(db, control_channel, command, *arguments)
# Were we asked to listen on a channel?
if not channel:
return
# OK, let's subscribe to the channel...
pubsub = db.pubsub()
pubsub.subscribe(channel)
# Now we listen...
try:
count = 0
for msg in pubsub.listen():
# Make sure the message is one we're interested in
if debug:
formatted = pprint.pformat(msg)
print >>sys.stderr, "Received message: %s" % formatted
if (msg['type'] not in ('pmessage', 'message') or
msg['channel'] != channel):
continue
count += 1
# Figure out the response
response = msg['data'].split(':')
# If this is a 'pong' and ts_conv is true, add an RTT to
# the response
if ts_conv and response[0] == 'pong':
try:
rtt = (time.time() - float(response[2])) * 100
response.append('(RTT %.2fms)' % rtt)
except Exception:
# IndexError or ValueError, probably; ignore it
pass
# Print out the response
print "Response % 5d: %s" % (count, ' '.join(response))
except KeyboardInterrupt:
# We want to break out of the loop, but not return any error
# to the caller...
pass | Issue a command to all running control daemons.
:param conf_file: Name of the configuration file.
:param command: The command to execute. Note that 'ping' is
handled specially; in particular, the "channel"
parameter is implied. (A random value will be
used for the channel to listen on.)
:param arguments: A list of arguments for the command. Note that
the colon character (':') cannot be used.
:param channel: If not None, specifies the name of a message
channel to listen for responses on. Will wait
indefinitely; to terminate the listening loop, use
the keyboard interrupt sequence.
:param debug: If True, debugging messages are emitted while
sending the command. | Below is the the instruction that describes the task:
### Input:
Issue a command to all running control daemons.
:param conf_file: Name of the configuration file.
:param command: The command to execute. Note that 'ping' is
handled specially; in particular, the "channel"
parameter is implied. (A random value will be
used for the channel to listen on.)
:param arguments: A list of arguments for the command. Note that
the colon character (':') cannot be used.
:param channel: If not None, specifies the name of a message
channel to listen for responses on. Will wait
indefinitely; to terminate the listening loop, use
the keyboard interrupt sequence.
:param debug: If True, debugging messages are emitted while
sending the command.
### Response:
def turnstile_command(conf_file, command, arguments=[], channel=None,
debug=False):
"""
Issue a command to all running control daemons.
:param conf_file: Name of the configuration file.
:param command: The command to execute. Note that 'ping' is
handled specially; in particular, the "channel"
parameter is implied. (A random value will be
used for the channel to listen on.)
:param arguments: A list of arguments for the command. Note that
the colon character (':') cannot be used.
:param channel: If not None, specifies the name of a message
channel to listen for responses on. Will wait
indefinitely; to terminate the listening loop, use
the keyboard interrupt sequence.
:param debug: If True, debugging messages are emitted while
sending the command.
"""
# Connect to the database...
conf = config.Config(conf_file=conf_file)
db = conf.get_database()
control_channel = conf['control'].get('channel', 'control')
# Now, set up the command
command = command.lower()
ts_conv = False
if command == 'ping':
# We handle 'ping' specially; first, figure out the channel
if arguments:
channel = arguments[0]
else:
channel = str(uuid.uuid4())
arguments = [channel]
# Next, add on a timestamp
if len(arguments) < 2:
arguments.append(time.time())
ts_conv = True
# Limit the argument list length
arguments = arguments[:2]
# OK, the command is all set up. Let us now send the command...
if debug:
cmd = [command] + arguments
print >>sys.stderr, ("Issuing command: %s" %
' '.join(cmd))
database.command(db, control_channel, command, *arguments)
# Were we asked to listen on a channel?
if not channel:
return
# OK, let's subscribe to the channel...
pubsub = db.pubsub()
pubsub.subscribe(channel)
# Now we listen...
try:
count = 0
for msg in pubsub.listen():
# Make sure the message is one we're interested in
if debug:
formatted = pprint.pformat(msg)
print >>sys.stderr, "Received message: %s" % formatted
if (msg['type'] not in ('pmessage', 'message') or
msg['channel'] != channel):
continue
count += 1
# Figure out the response
response = msg['data'].split(':')
# If this is a 'pong' and ts_conv is true, add an RTT to
# the response
if ts_conv and response[0] == 'pong':
try:
rtt = (time.time() - float(response[2])) * 100
response.append('(RTT %.2fms)' % rtt)
except Exception:
# IndexError or ValueError, probably; ignore it
pass
# Print out the response
print "Response % 5d: %s" % (count, ' '.join(response))
except KeyboardInterrupt:
# We want to break out of the loop, but not return any error
# to the caller...
pass |
def create_email(self, name, subject, html, text=''):
""" [DECPRECATED] API call to create an email """
return self.create_template(name, subject, html, text) | [DECPRECATED] API call to create an email | Below is the the instruction that describes the task:
### Input:
[DECPRECATED] API call to create an email
### Response:
def create_email(self, name, subject, html, text=''):
""" [DECPRECATED] API call to create an email """
return self.create_template(name, subject, html, text) |
def _raw_pack(key_handle, flags, data):
"""
Common code for packing payload to YHSM_HMAC_SHA1_GENERATE command.
"""
# #define YHSM_HMAC_RESET 0x01 // Flag to indicate reset at first packet
# #define YHSM_HMAC_FINAL 0x02 // Flag to indicate that the hash shall be calculated
# typedef struct {
# uint32_t keyHandle; // Key handle
# uint8_t flags; // Flags
# uint8_t numBytes; // Number of bytes in data packet
# uint8_t data[YHSM_MAX_PKT_SIZE - 6]; // Data to be written
# } YHSM_HMAC_SHA1_GENERATE_REQ;
return struct.pack('<IBB', key_handle, flags, len(data)) + data | Common code for packing payload to YHSM_HMAC_SHA1_GENERATE command. | Below is the the instruction that describes the task:
### Input:
Common code for packing payload to YHSM_HMAC_SHA1_GENERATE command.
### Response:
def _raw_pack(key_handle, flags, data):
"""
Common code for packing payload to YHSM_HMAC_SHA1_GENERATE command.
"""
# #define YHSM_HMAC_RESET 0x01 // Flag to indicate reset at first packet
# #define YHSM_HMAC_FINAL 0x02 // Flag to indicate that the hash shall be calculated
# typedef struct {
# uint32_t keyHandle; // Key handle
# uint8_t flags; // Flags
# uint8_t numBytes; // Number of bytes in data packet
# uint8_t data[YHSM_MAX_PKT_SIZE - 6]; // Data to be written
# } YHSM_HMAC_SHA1_GENERATE_REQ;
return struct.pack('<IBB', key_handle, flags, len(data)) + data |
def _stop_ubridge_capture(self, adapter_number):
"""
Stop a packet capture in uBridge.
:param adapter_number: adapter number
"""
vnet = "ethernet{}.vnet".format(adapter_number)
if vnet not in self._vmx_pairs:
raise VMwareError("vnet {} not in VMX file".format(vnet))
if not self._ubridge_hypervisor:
raise VMwareError("Cannot stop the packet capture: uBridge is not running")
yield from self._ubridge_send("bridge stop_capture {name}".format(name=vnet)) | Stop a packet capture in uBridge.
:param adapter_number: adapter number | Below is the the instruction that describes the task:
### Input:
Stop a packet capture in uBridge.
:param adapter_number: adapter number
### Response:
def _stop_ubridge_capture(self, adapter_number):
"""
Stop a packet capture in uBridge.
:param adapter_number: adapter number
"""
vnet = "ethernet{}.vnet".format(adapter_number)
if vnet not in self._vmx_pairs:
raise VMwareError("vnet {} not in VMX file".format(vnet))
if not self._ubridge_hypervisor:
raise VMwareError("Cannot stop the packet capture: uBridge is not running")
yield from self._ubridge_send("bridge stop_capture {name}".format(name=vnet)) |
def revnet_cifar_base():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams | Tiny hparams suitable for CIFAR/etc. | Below is the the instruction that describes the task:
### Input:
Tiny hparams suitable for CIFAR/etc.
### Response:
def revnet_cifar_base():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams |
def normalize_path(path, base_path=u'/', is_dir=None):
u"""
Normalize a path to use it with a gitmatch pattern.
This ensures that the separators are forward slashes.
If a path is rooted (starts with a slash), it has to be a subdirectory of `base_path`. The
path root is then changed to be based of `base_path`.
:type path: text_type
:param path: A POSIX path to normalize
:type base_path: text_type
:param base_path: A POSIX path to the base directory, `path` must be inside `base_path`.
:type is_dir: text_type
:param is_dir: If `true`, adds a trailing slash. If `false` removes any trailing slash. If
`None`, keeps the current ending.
:return:
"""
path = posixpath.normpath(path)
base_path = posixpath.normpath(base_path)
if len(base_path) == 0:
raise ValueError(u'`project_root` cannot be an empty string after normalization')
if base_path[-1] != u'/':
base_path += u'/'
if path.startswith(base_path):
path = u'/' + posixpath.relpath(path, base_path)
elif path.startswith(u'/'):
raise ValueError(u'`path` ({}) is absolute but not inside base_path ({})'.format(path,
base_path))
if is_dir is None:
return path
elif is_dir and path[-1:] != u'/':
return path + u'/'
elif not is_dir and path[-1:] == u'/':
return path[:-1]
return path | u"""
Normalize a path to use it with a gitmatch pattern.
This ensures that the separators are forward slashes.
If a path is rooted (starts with a slash), it has to be a subdirectory of `base_path`. The
path root is then changed to be based of `base_path`.
:type path: text_type
:param path: A POSIX path to normalize
:type base_path: text_type
:param base_path: A POSIX path to the base directory, `path` must be inside `base_path`.
:type is_dir: text_type
:param is_dir: If `true`, adds a trailing slash. If `false` removes any trailing slash. If
`None`, keeps the current ending.
:return: | Below is the the instruction that describes the task:
### Input:
u"""
Normalize a path to use it with a gitmatch pattern.
This ensures that the separators are forward slashes.
If a path is rooted (starts with a slash), it has to be a subdirectory of `base_path`. The
path root is then changed to be based of `base_path`.
:type path: text_type
:param path: A POSIX path to normalize
:type base_path: text_type
:param base_path: A POSIX path to the base directory, `path` must be inside `base_path`.
:type is_dir: text_type
:param is_dir: If `true`, adds a trailing slash. If `false` removes any trailing slash. If
`None`, keeps the current ending.
:return:
### Response:
def normalize_path(path, base_path=u'/', is_dir=None):
u"""
Normalize a path to use it with a gitmatch pattern.
This ensures that the separators are forward slashes.
If a path is rooted (starts with a slash), it has to be a subdirectory of `base_path`. The
path root is then changed to be based of `base_path`.
:type path: text_type
:param path: A POSIX path to normalize
:type base_path: text_type
:param base_path: A POSIX path to the base directory, `path` must be inside `base_path`.
:type is_dir: text_type
:param is_dir: If `true`, adds a trailing slash. If `false` removes any trailing slash. If
`None`, keeps the current ending.
:return:
"""
path = posixpath.normpath(path)
base_path = posixpath.normpath(base_path)
if len(base_path) == 0:
raise ValueError(u'`project_root` cannot be an empty string after normalization')
if base_path[-1] != u'/':
base_path += u'/'
if path.startswith(base_path):
path = u'/' + posixpath.relpath(path, base_path)
elif path.startswith(u'/'):
raise ValueError(u'`path` ({}) is absolute but not inside base_path ({})'.format(path,
base_path))
if is_dir is None:
return path
elif is_dir and path[-1:] != u'/':
return path + u'/'
elif not is_dir and path[-1:] == u'/':
return path[:-1]
return path |
def getSingleVisualProperty(self, visualProperty, verbose=None):
"""
Return the Visual Property specified by the `visualProperty` parameter.
Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html)
:param visualProperty: ID of the Visual Property
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'styles/visualproperties/'+str(visualProperty)+'', method="GET", verbose=verbose, parse_params=False)
return response | Return the Visual Property specified by the `visualProperty` parameter.
Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html)
:param visualProperty: ID of the Visual Property
:param verbose: print more
:returns: 200: successful operation | Below is the the instruction that describes the task:
### Input:
Return the Visual Property specified by the `visualProperty` parameter.
Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html)
:param visualProperty: ID of the Visual Property
:param verbose: print more
:returns: 200: successful operation
### Response:
def getSingleVisualProperty(self, visualProperty, verbose=None):
"""
Return the Visual Property specified by the `visualProperty` parameter.
Additional details on common Visual Properties can be found in the [Basic Visual Lexicon JavaDoc API](http://chianti.ucsd.edu/cytoscape-3.6.1/API/org/cytoscape/view/presentation/property/BasicVisualLexicon.html)
:param visualProperty: ID of the Visual Property
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'styles/visualproperties/'+str(visualProperty)+'', method="GET", verbose=verbose, parse_params=False)
return response |
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list} | Return a matching (possibly munged) command, if found in rule. | Below is the the instruction that describes the task:
### Input:
Return a matching (possibly munged) command, if found in rule.
### Response:
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list} |
def nac_v(msg):
"""Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
"""
tc = typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg)
msgbin = common.hex2bin(msg)
NACv = common.bin2int(msgbin[42:45])
try:
HFOMr = uncertainty.NACv[NACv]['HFOMr']
VFOMr = uncertainty.NACv[NACv]['VFOMr']
except KeyError:
HFOMr, VFOMr = uncertainty.NA, uncertainty.NA
return HFOMr, VFOMr | Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit | Below is the the instruction that describes the task:
### Input:
Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
### Response:
def nac_v(msg):
"""Calculate NACv, Navigation Accuracy Category - Velocity
Args:
msg (string): 28 bytes hexadecimal message string, TC = 19
Returns:
int or string: 95% horizontal accuracy bounds for velocity, Horizontal Figure of Merit
int or string: 95% vertical accuracy bounds for velocity, Vertical Figure of Merit
"""
tc = typecode(msg)
if tc != 19:
raise RuntimeError("%s: Not an airborne velocity message, expecting TC = 19" % msg)
msgbin = common.hex2bin(msg)
NACv = common.bin2int(msgbin[42:45])
try:
HFOMr = uncertainty.NACv[NACv]['HFOMr']
VFOMr = uncertainty.NACv[NACv]['VFOMr']
except KeyError:
HFOMr, VFOMr = uncertainty.NA, uncertainty.NA
return HFOMr, VFOMr |
def create_server(cloud, **kwargs):
"""
Create a new instance
"""
if cloud == 'ec2':
_create_server_ec2(**kwargs)
elif cloud == 'rackspace':
_create_server_rackspace(**kwargs)
elif cloud == 'gce':
_create_server_gce(**kwargs)
else:
raise ValueError("Unknown cloud type: {}".format(cloud)) | Create a new instance | Below is the the instruction that describes the task:
### Input:
Create a new instance
### Response:
def create_server(cloud, **kwargs):
"""
Create a new instance
"""
if cloud == 'ec2':
_create_server_ec2(**kwargs)
elif cloud == 'rackspace':
_create_server_rackspace(**kwargs)
elif cloud == 'gce':
_create_server_gce(**kwargs)
else:
raise ValueError("Unknown cloud type: {}".format(cloud)) |
def permission_required_with_ajax(perm, login_url=None):
"""
Decorator for views that checks whether a user has a particular
permission enabled, redirecting to the log-in page if necessary,
but returns a special response for ajax requests. See
:meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`.
Usage is the same as
:meth:`django.contrib.auth.decorators.permission_required` ::
@permission_required_with_ajax('polls.can_vote', login_url='/loginpage/')
def my_view(request):
...
"""
return user_passes_test_with_ajax(lambda u: u.has_perm(perm), login_url=login_url) | Decorator for views that checks whether a user has a particular
permission enabled, redirecting to the log-in page if necessary,
but returns a special response for ajax requests. See
:meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`.
Usage is the same as
:meth:`django.contrib.auth.decorators.permission_required` ::
@permission_required_with_ajax('polls.can_vote', login_url='/loginpage/')
def my_view(request):
... | Below is the the instruction that describes the task:
### Input:
Decorator for views that checks whether a user has a particular
permission enabled, redirecting to the log-in page if necessary,
but returns a special response for ajax requests. See
:meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`.
Usage is the same as
:meth:`django.contrib.auth.decorators.permission_required` ::
@permission_required_with_ajax('polls.can_vote', login_url='/loginpage/')
def my_view(request):
...
### Response:
def permission_required_with_ajax(perm, login_url=None):
"""
Decorator for views that checks whether a user has a particular
permission enabled, redirecting to the log-in page if necessary,
but returns a special response for ajax requests. See
:meth:`eulcore.django.auth.decorators.user_passes_test_with_ajax`.
Usage is the same as
:meth:`django.contrib.auth.decorators.permission_required` ::
@permission_required_with_ajax('polls.can_vote', login_url='/loginpage/')
def my_view(request):
...
"""
return user_passes_test_with_ajax(lambda u: u.has_perm(perm), login_url=login_url) |
def get_website_endpoint(self):
"""
Returns the fully qualified hostname to use is you want to access this
bucket as a website. This doesn't validate whether the bucket has
been correctly configured as a website or not.
"""
l = [self.name]
l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()))
l.append('.'.join(self.connection.host.split('.')[-2:]))
return '.'.join(l) | Returns the fully qualified hostname to use is you want to access this
bucket as a website. This doesn't validate whether the bucket has
been correctly configured as a website or not. | Below is the the instruction that describes the task:
### Input:
Returns the fully qualified hostname to use is you want to access this
bucket as a website. This doesn't validate whether the bucket has
been correctly configured as a website or not.
### Response:
def get_website_endpoint(self):
"""
Returns the fully qualified hostname to use is you want to access this
bucket as a website. This doesn't validate whether the bucket has
been correctly configured as a website or not.
"""
l = [self.name]
l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()))
l.append('.'.join(self.connection.host.split('.')[-2:]))
return '.'.join(l) |
def batch_predict(training_dir, prediction_input_file, output_dir,
mode, batch_size=16, shard_files=True, output_format='csv',
cloud=False):
"""Blocking versoin of batch_predict.
See documentation of batch_prediction_async.
"""
job = batch_predict_async(
training_dir=training_dir,
prediction_input_file=prediction_input_file,
output_dir=output_dir,
mode=mode,
batch_size=batch_size,
shard_files=shard_files,
output_format=output_format,
cloud=cloud)
job.wait()
print('Batch predict: ' + str(job.state)) | Blocking versoin of batch_predict.
See documentation of batch_prediction_async. | Below is the the instruction that describes the task:
### Input:
Blocking versoin of batch_predict.
See documentation of batch_prediction_async.
### Response:
def batch_predict(training_dir, prediction_input_file, output_dir,
mode, batch_size=16, shard_files=True, output_format='csv',
cloud=False):
"""Blocking versoin of batch_predict.
See documentation of batch_prediction_async.
"""
job = batch_predict_async(
training_dir=training_dir,
prediction_input_file=prediction_input_file,
output_dir=output_dir,
mode=mode,
batch_size=batch_size,
shard_files=shard_files,
output_format=output_format,
cloud=cloud)
job.wait()
print('Batch predict: ' + str(job.state)) |
def create_snapshot(self, volume, name=None, description=None, force=False):
"""
Creates a snapshot of the volume, with an optional name and description.
Normally snapshots will not happen if the volume is attached. To
override this default behavior, pass force=True.
"""
return self._snapshot_manager.create(volume=volume, name=name,
description=description, force=force) | Creates a snapshot of the volume, with an optional name and description.
Normally snapshots will not happen if the volume is attached. To
override this default behavior, pass force=True. | Below is the the instruction that describes the task:
### Input:
Creates a snapshot of the volume, with an optional name and description.
Normally snapshots will not happen if the volume is attached. To
override this default behavior, pass force=True.
### Response:
def create_snapshot(self, volume, name=None, description=None, force=False):
"""
Creates a snapshot of the volume, with an optional name and description.
Normally snapshots will not happen if the volume is attached. To
override this default behavior, pass force=True.
"""
return self._snapshot_manager.create(volume=volume, name=name,
description=description, force=force) |
def clear_selection(self):
"""Clears text cursor selection."""
text_cursor = self._editor.textCursor()
text_cursor.clearSelection()
self._editor.setTextCursor(text_cursor) | Clears text cursor selection. | Below is the the instruction that describes the task:
### Input:
Clears text cursor selection.
### Response:
def clear_selection(self):
"""Clears text cursor selection."""
text_cursor = self._editor.textCursor()
text_cursor.clearSelection()
self._editor.setTextCursor(text_cursor) |
def import_authors(self, tree):
"""
Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion.
"""
self.write_out(self.style.STEP('- Importing authors\n'))
post_authors = set()
for item in tree.findall('channel/item'):
post_type = item.find('{%s}post_type' % WP_NS).text
if post_type == 'post':
post_authors.add(item.find(
'{http://purl.org/dc/elements/1.1/}creator').text)
self.write_out('> %i authors found.\n' % len(post_authors))
authors = {}
for post_author in post_authors:
if self.default_author:
authors[post_author] = self.default_author
else:
authors[post_author] = self.migrate_author(
post_author.replace(' ', '-'))
return authors | Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion. | Below is the the instruction that describes the task:
### Input:
Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion.
### Response:
def import_authors(self, tree):
"""
Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion.
"""
self.write_out(self.style.STEP('- Importing authors\n'))
post_authors = set()
for item in tree.findall('channel/item'):
post_type = item.find('{%s}post_type' % WP_NS).text
if post_type == 'post':
post_authors.add(item.find(
'{http://purl.org/dc/elements/1.1/}creator').text)
self.write_out('> %i authors found.\n' % len(post_authors))
authors = {}
for post_author in post_authors:
if self.default_author:
authors[post_author] = self.default_author
else:
authors[post_author] = self.migrate_author(
post_author.replace(' ', '-'))
return authors |
def set(self, name, value):
"""
Stores the given variable/value in the object for later retrieval.
:type name: string
:param name: The name of the variable.
:type value: object
:param value: The value of the variable.
"""
if self.vars is None:
self.vars = {}
self.vars[name] = value | Stores the given variable/value in the object for later retrieval.
:type name: string
:param name: The name of the variable.
:type value: object
:param value: The value of the variable. | Below is the the instruction that describes the task:
### Input:
Stores the given variable/value in the object for later retrieval.
:type name: string
:param name: The name of the variable.
:type value: object
:param value: The value of the variable.
### Response:
def set(self, name, value):
"""
Stores the given variable/value in the object for later retrieval.
:type name: string
:param name: The name of the variable.
:type value: object
:param value: The value of the variable.
"""
if self.vars is None:
self.vars = {}
self.vars[name] = value |
def arc(self,
radius,
initial_angle,
final_angle,
number_of_points=0.01,
max_points=199,
final_width=None,
final_distance=None,
layer=0,
datatype=0):
"""
Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
initial_angle : number
Initial angle of the curve (in *radians*).
final_angle : number
Final angle of the curve (in *radians*).
number_of_points : integer or float
If integer: number of vertices that form the object
(polygonal approximation). If float: approximate curvature
resolution. The actual number of points is automatically
calculated.
max_points : integer
if ``number_of_points > max_points``, the element will be
fractured in smaller polygons with at most ``max_points``
each.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : ``Path``
This object.
Notes
-----
The GDSII specification supports only a maximum of 199 vertices
per polygon.
"""
warn = True
cx = self.x - radius * numpy.cos(initial_angle)
cy = self.y - radius * numpy.sin(initial_angle)
self.x = cx + radius * numpy.cos(final_angle)
self.y = cy + radius * numpy.sin(final_angle)
if final_angle > initial_angle:
self.direction = final_angle + numpy.pi * 0.5
else:
self.direction = final_angle - numpy.pi * 0.5
old_w = self.w
old_distance = self.distance
if final_width is not None:
self.w = final_width * 0.5
if final_distance is not None:
self.distance = final_distance
if isinstance(number_of_points, float):
number_of_points = 2 * int(
abs((final_angle - initial_angle) *
(radius + max(old_distance, self.distance) *
(self.n - 1) * 0.5 + max(old_w, self.w)) /
number_of_points) + 0.5) + 2
number_of_points = max(number_of_points, 3)
pieces = int(numpy.ceil(number_of_points / float(max_points)))
number_of_points = number_of_points // pieces
widths = numpy.linspace(old_w, self.w, pieces + 1)
distances = numpy.linspace(old_distance, self.distance, pieces + 1)
angles = numpy.linspace(initial_angle, final_angle, pieces + 1)
if (self.w != 0) or (old_w != 0):
for jj in range(pieces):
for ii in range(self.n):
self.polygons.append(numpy.zeros((number_of_points, 2)))
r0 = radius + ii * distances[jj + 1] - (
self.n - 1) * distances[jj + 1] * 0.5
old_r0 = radius + ii * distances[jj] - (
self.n - 1) * distances[jj] * 0.5
pts2 = number_of_points // 2
pts1 = number_of_points - pts2
ang = numpy.linspace(angles[jj], angles[jj + 1], pts1)
rad = numpy.linspace(old_r0 + widths[jj],
r0 + widths[jj + 1], pts1)
self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy
if widths[jj + 1] == 0:
pts1 -= 1
pts2 += 1
if widths[jj] == 0:
self.polygons[-1][:pts1 - 1, :] = numpy.array(
self.polygons[-1][1:pts1, :])
pts1 -= 1
pts2 += 1
ang = numpy.linspace(angles[jj + 1], angles[jj], pts2)
rad = numpy.linspace(r0 - widths[jj + 1],
old_r0 - widths[jj], pts2)
if (rad[0] <= 0 or rad[-1] <= 0) and warn:
warnings.warn(
"[GDSPY] Path arc with width larger than radius "
"created: possible self-intersecting polygon.",
stacklevel=2)
warn = False
self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy
self.length += abs((angles[jj + 1] - angles[jj]) * radius)
if isinstance(layer, list):
self.layers.extend(
(layer * (self.n // len(layer) + 1))[:self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[:self.n])
else:
self.datatypes.extend(datatype for _ in range(self.n))
return self | Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
initial_angle : number
Initial angle of the curve (in *radians*).
final_angle : number
Final angle of the curve (in *radians*).
number_of_points : integer or float
If integer: number of vertices that form the object
(polygonal approximation). If float: approximate curvature
resolution. The actual number of points is automatically
calculated.
max_points : integer
if ``number_of_points > max_points``, the element will be
fractured in smaller polygons with at most ``max_points``
each.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : ``Path``
This object.
Notes
-----
The GDSII specification supports only a maximum of 199 vertices
per polygon. | Below is the the instruction that describes the task:
### Input:
Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
initial_angle : number
Initial angle of the curve (in *radians*).
final_angle : number
Final angle of the curve (in *radians*).
number_of_points : integer or float
If integer: number of vertices that form the object
(polygonal approximation). If float: approximate curvature
resolution. The actual number of points is automatically
calculated.
max_points : integer
if ``number_of_points > max_points``, the element will be
fractured in smaller polygons with at most ``max_points``
each.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : ``Path``
This object.
Notes
-----
The GDSII specification supports only a maximum of 199 vertices
per polygon.
### Response:
def arc(self,
radius,
initial_angle,
final_angle,
number_of_points=0.01,
max_points=199,
final_width=None,
final_distance=None,
layer=0,
datatype=0):
"""
Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
initial_angle : number
Initial angle of the curve (in *radians*).
final_angle : number
Final angle of the curve (in *radians*).
number_of_points : integer or float
If integer: number of vertices that form the object
(polygonal approximation). If float: approximate curvature
resolution. The actual number of points is automatically
calculated.
max_points : integer
if ``number_of_points > max_points``, the element will be
fractured in smaller polygons with at most ``max_points``
each.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : ``Path``
This object.
Notes
-----
The GDSII specification supports only a maximum of 199 vertices
per polygon.
"""
warn = True
cx = self.x - radius * numpy.cos(initial_angle)
cy = self.y - radius * numpy.sin(initial_angle)
self.x = cx + radius * numpy.cos(final_angle)
self.y = cy + radius * numpy.sin(final_angle)
if final_angle > initial_angle:
self.direction = final_angle + numpy.pi * 0.5
else:
self.direction = final_angle - numpy.pi * 0.5
old_w = self.w
old_distance = self.distance
if final_width is not None:
self.w = final_width * 0.5
if final_distance is not None:
self.distance = final_distance
if isinstance(number_of_points, float):
number_of_points = 2 * int(
abs((final_angle - initial_angle) *
(radius + max(old_distance, self.distance) *
(self.n - 1) * 0.5 + max(old_w, self.w)) /
number_of_points) + 0.5) + 2
number_of_points = max(number_of_points, 3)
pieces = int(numpy.ceil(number_of_points / float(max_points)))
number_of_points = number_of_points // pieces
widths = numpy.linspace(old_w, self.w, pieces + 1)
distances = numpy.linspace(old_distance, self.distance, pieces + 1)
angles = numpy.linspace(initial_angle, final_angle, pieces + 1)
if (self.w != 0) or (old_w != 0):
for jj in range(pieces):
for ii in range(self.n):
self.polygons.append(numpy.zeros((number_of_points, 2)))
r0 = radius + ii * distances[jj + 1] - (
self.n - 1) * distances[jj + 1] * 0.5
old_r0 = radius + ii * distances[jj] - (
self.n - 1) * distances[jj] * 0.5
pts2 = number_of_points // 2
pts1 = number_of_points - pts2
ang = numpy.linspace(angles[jj], angles[jj + 1], pts1)
rad = numpy.linspace(old_r0 + widths[jj],
r0 + widths[jj + 1], pts1)
self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy
if widths[jj + 1] == 0:
pts1 -= 1
pts2 += 1
if widths[jj] == 0:
self.polygons[-1][:pts1 - 1, :] = numpy.array(
self.polygons[-1][1:pts1, :])
pts1 -= 1
pts2 += 1
ang = numpy.linspace(angles[jj + 1], angles[jj], pts2)
rad = numpy.linspace(r0 - widths[jj + 1],
old_r0 - widths[jj], pts2)
if (rad[0] <= 0 or rad[-1] <= 0) and warn:
warnings.warn(
"[GDSPY] Path arc with width larger than radius "
"created: possible self-intersecting polygon.",
stacklevel=2)
warn = False
self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy
self.length += abs((angles[jj + 1] - angles[jj]) * radius)
if isinstance(layer, list):
self.layers.extend(
(layer * (self.n // len(layer) + 1))[:self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[:self.n])
else:
self.datatypes.extend(datatype for _ in range(self.n))
return self |
def remove_external_data_field(tensor, field_key): # type: (TensorProto, Text) -> None
"""
Remove a field from a Tensor's external_data key-value store.
Modifies tensor object in place.
@params
tensor: Tensor object from which value will be removed
field_key: The key of the field to be removed
"""
for (i, field) in enumerate(tensor.external_data):
if field.key == field_key:
del tensor.external_data[i] | Remove a field from a Tensor's external_data key-value store.
Modifies tensor object in place.
@params
tensor: Tensor object from which value will be removed
field_key: The key of the field to be removed | Below is the the instruction that describes the task:
### Input:
Remove a field from a Tensor's external_data key-value store.
Modifies tensor object in place.
@params
tensor: Tensor object from which value will be removed
field_key: The key of the field to be removed
### Response:
def remove_external_data_field(tensor, field_key): # type: (TensorProto, Text) -> None
"""
Remove a field from a Tensor's external_data key-value store.
Modifies tensor object in place.
@params
tensor: Tensor object from which value will be removed
field_key: The key of the field to be removed
"""
for (i, field) in enumerate(tensor.external_data):
if field.key == field_key:
del tensor.external_data[i] |
def err(self, output, newline=True):
"""Outputs an error string to the console (stderr)."""
click.echo(output, nl=newline, err=True) | Outputs an error string to the console (stderr). | Below is the the instruction that describes the task:
### Input:
Outputs an error string to the console (stderr).
### Response:
def err(self, output, newline=True):
"""Outputs an error string to the console (stderr)."""
click.echo(output, nl=newline, err=True) |
def delete_collection_cluster_role_binding(self, **kwargs):
"""
delete collection of ClusterRoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role_binding(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_cluster_role_binding_with_http_info(**kwargs)
else:
(data) = self.delete_collection_cluster_role_binding_with_http_info(**kwargs)
return data | delete collection of ClusterRoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role_binding(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
delete collection of ClusterRoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role_binding(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_collection_cluster_role_binding(self, **kwargs):
"""
delete collection of ClusterRoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_cluster_role_binding(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_cluster_role_binding_with_http_info(**kwargs)
else:
(data) = self.delete_collection_cluster_role_binding_with_http_info(**kwargs)
return data |
def ToStream(value):
"""
Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: not hexlified
"""
ms = StreamManager.GetStream()
writer = BinaryWriter(ms)
value.Serialize(writer)
retVal = ms.getvalue()
StreamManager.ReleaseStream(ms)
return retVal | Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: not hexlified | Below is the the instruction that describes the task:
### Input:
Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: not hexlified
### Response:
def ToStream(value):
"""
Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: not hexlified
"""
ms = StreamManager.GetStream()
writer = BinaryWriter(ms)
value.Serialize(writer)
retVal = ms.getvalue()
StreamManager.ReleaseStream(ms)
return retVal |
def describe_vpc_peering_connection(name,
region=None,
key=None,
keyid=None,
profile=None):
'''
Returns any VPC peering connection id(s) for the given VPC
peering connection name.
VPC peering connection ids are only returned for connections that
are in the ``active``, ``pending-acceptance`` or ``provisioning``
state.
.. versionadded:: 2016.11.0
:param name: The string name for this VPC peering connection
:param region: The aws region to use
:param key: Your aws key
:param keyid: The key id associated with this aws account
:param profile: The profile to use
:return: dict
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc
# Specify a region
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2
'''
conn = _get_conn3(region=region, key=key, keyid=keyid,
profile=profile)
return {
'VPC-Peerings': _get_peering_connection_ids(name, conn)
} | Returns any VPC peering connection id(s) for the given VPC
peering connection name.
VPC peering connection ids are only returned for connections that
are in the ``active``, ``pending-acceptance`` or ``provisioning``
state.
.. versionadded:: 2016.11.0
:param name: The string name for this VPC peering connection
:param region: The aws region to use
:param key: Your aws key
:param keyid: The key id associated with this aws account
:param profile: The profile to use
:return: dict
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc
# Specify a region
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2 | Below is the the instruction that describes the task:
### Input:
Returns any VPC peering connection id(s) for the given VPC
peering connection name.
VPC peering connection ids are only returned for connections that
are in the ``active``, ``pending-acceptance`` or ``provisioning``
state.
.. versionadded:: 2016.11.0
:param name: The string name for this VPC peering connection
:param region: The aws region to use
:param key: Your aws key
:param keyid: The key id associated with this aws account
:param profile: The profile to use
:return: dict
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc
# Specify a region
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2
### Response:
def describe_vpc_peering_connection(name,
region=None,
key=None,
keyid=None,
profile=None):
'''
Returns any VPC peering connection id(s) for the given VPC
peering connection name.
VPC peering connection ids are only returned for connections that
are in the ``active``, ``pending-acceptance`` or ``provisioning``
state.
.. versionadded:: 2016.11.0
:param name: The string name for this VPC peering connection
:param region: The aws region to use
:param key: Your aws key
:param keyid: The key id associated with this aws account
:param profile: The profile to use
:return: dict
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc
# Specify a region
salt myminion boto_vpc.describe_vpc_peering_connection salt-vpc region=us-west-2
'''
conn = _get_conn3(region=region, key=key, keyid=keyid,
profile=profile)
return {
'VPC-Peerings': _get_peering_connection_ids(name, conn)
} |
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color) | Wraps click.echo, handles formatting and check encoding | Below is the the instruction that describes the task:
### Input:
Wraps click.echo, handles formatting and check encoding
### Response:
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color) |
def send(self, messages):
"""Send a SMS message, or an array of SMS messages"""
tmpSms = SMS(to='', message='')
if str(type(messages)) == str(type(tmpSms)):
messages = [messages]
xml_root = self.__init_xml('Message')
wrapper_id = 0
for m in messages:
m.wrapper_id = wrapper_id
msg = self.__build_sms_data(m)
sms = etree.SubElement(xml_root, 'SMS')
for sms_element in msg:
element = etree.SubElement(sms, sms_element)
element.text = msg[sms_element]
# print etree.tostring(xml_root)
response = clockwork_http.request(SMS_URL, etree.tostring(xml_root, encoding='utf-8'))
response_data = response['data']
# print response_data
data_etree = etree.fromstring(response_data)
# Check for general error
err_desc = data_etree.find('ErrDesc')
if err_desc is not None:
raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text)
# Return a consistent object
results = []
for sms in data_etree:
matching_sms = next((s for s in messages if str(s.wrapper_id) == sms.find('WrapperID').text), None)
new_result = SMSResponse(
sms = matching_sms,
id = '' if sms.find('MessageID') is None else sms.find('MessageID').text,
error_code = 0 if sms.find('ErrNo') is None else sms.find('ErrNo').text,
error_message = '' if sms.find('ErrDesc') is None else sms.find('ErrDesc').text,
success = True if sms.find('ErrNo') is None else (sms.find('ErrNo').text == 0)
)
results.append(new_result)
if len(results) > 1:
return results
return results[0] | Send a SMS message, or an array of SMS messages | Below is the the instruction that describes the task:
### Input:
Send a SMS message, or an array of SMS messages
### Response:
def send(self, messages):
"""Send a SMS message, or an array of SMS messages"""
tmpSms = SMS(to='', message='')
if str(type(messages)) == str(type(tmpSms)):
messages = [messages]
xml_root = self.__init_xml('Message')
wrapper_id = 0
for m in messages:
m.wrapper_id = wrapper_id
msg = self.__build_sms_data(m)
sms = etree.SubElement(xml_root, 'SMS')
for sms_element in msg:
element = etree.SubElement(sms, sms_element)
element.text = msg[sms_element]
# print etree.tostring(xml_root)
response = clockwork_http.request(SMS_URL, etree.tostring(xml_root, encoding='utf-8'))
response_data = response['data']
# print response_data
data_etree = etree.fromstring(response_data)
# Check for general error
err_desc = data_etree.find('ErrDesc')
if err_desc is not None:
raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text)
# Return a consistent object
results = []
for sms in data_etree:
matching_sms = next((s for s in messages if str(s.wrapper_id) == sms.find('WrapperID').text), None)
new_result = SMSResponse(
sms = matching_sms,
id = '' if sms.find('MessageID') is None else sms.find('MessageID').text,
error_code = 0 if sms.find('ErrNo') is None else sms.find('ErrNo').text,
error_message = '' if sms.find('ErrDesc') is None else sms.find('ErrDesc').text,
success = True if sms.find('ErrNo') is None else (sms.find('ErrNo').text == 0)
)
results.append(new_result)
if len(results) > 1:
return results
return results[0] |
def itin(self):
"""Generate a random United States Individual Taxpayer Identification Number (ITIN).
An United States Individual Taxpayer Identification Number
(ITIN) is a tax processing number issued by the Internal
Revenue Service. It is a nine-digit number that always begins
with the number 9 and has a range of 70-88 in the fourth and
fifth digit. Effective April 12, 2011, the range was extended
to include 900-70-0000 through 999-88-9999, 900-90-0000
through 999-92-9999 and 900-94-0000 through 999-99-9999.
https://www.irs.gov/individuals/international-taxpayers/general-itin-information
"""
area = self.random_int(min=900, max=999)
serial = self.random_int(min=0, max=9999)
# The group number must be between 70 and 99 inclusively but not 89 or 93
group = random.choice([x for x in range(70, 100) if x not in [89, 93]])
itin = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial)
return itin | Generate a random United States Individual Taxpayer Identification Number (ITIN).
An United States Individual Taxpayer Identification Number
(ITIN) is a tax processing number issued by the Internal
Revenue Service. It is a nine-digit number that always begins
with the number 9 and has a range of 70-88 in the fourth and
fifth digit. Effective April 12, 2011, the range was extended
to include 900-70-0000 through 999-88-9999, 900-90-0000
through 999-92-9999 and 900-94-0000 through 999-99-9999.
https://www.irs.gov/individuals/international-taxpayers/general-itin-information | Below is the the instruction that describes the task:
### Input:
Generate a random United States Individual Taxpayer Identification Number (ITIN).
An United States Individual Taxpayer Identification Number
(ITIN) is a tax processing number issued by the Internal
Revenue Service. It is a nine-digit number that always begins
with the number 9 and has a range of 70-88 in the fourth and
fifth digit. Effective April 12, 2011, the range was extended
to include 900-70-0000 through 999-88-9999, 900-90-0000
through 999-92-9999 and 900-94-0000 through 999-99-9999.
https://www.irs.gov/individuals/international-taxpayers/general-itin-information
### Response:
def itin(self):
"""Generate a random United States Individual Taxpayer Identification Number (ITIN).
An United States Individual Taxpayer Identification Number
(ITIN) is a tax processing number issued by the Internal
Revenue Service. It is a nine-digit number that always begins
with the number 9 and has a range of 70-88 in the fourth and
fifth digit. Effective April 12, 2011, the range was extended
to include 900-70-0000 through 999-88-9999, 900-90-0000
through 999-92-9999 and 900-94-0000 through 999-99-9999.
https://www.irs.gov/individuals/international-taxpayers/general-itin-information
"""
area = self.random_int(min=900, max=999)
serial = self.random_int(min=0, max=9999)
# The group number must be between 70 and 99 inclusively but not 89 or 93
group = random.choice([x for x in range(70, 100) if x not in [89, 93]])
itin = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial)
return itin |
def ingest(topic, text, **kwargs):
""" Ingest the given text for the topic """
if not text:
raise ValueError('No text given to ingest for topic: ' + topic)
data = {'topic': topic, 'text': text.strip()}
data.update(kwargs)
db.markovify.insert(data) | Ingest the given text for the topic | Below is the the instruction that describes the task:
### Input:
Ingest the given text for the topic
### Response:
def ingest(topic, text, **kwargs):
""" Ingest the given text for the topic """
if not text:
raise ValueError('No text given to ingest for topic: ' + topic)
data = {'topic': topic, 'text': text.strip()}
data.update(kwargs)
db.markovify.insert(data) |
def start(self, callback, rate=SENSOR_DELAY_NORMAL):
""" Start listening to sensor events. Sensor event data depends
on the type of sensor that was given to
Parameters
----------
callback: Callable
A callback that takes one argument that will be passed
the sensor data. Sensor data is a dict with data based on
the type of sensor.
rate: Integer
How fast to update. One of the Sensor.SENSOR_DELAY values
Returns
-------
result: Future
A future that resolves to whether the register call
completed.
"""
if not self.manager:
raise RuntimeError(
"Cannot start a sensor without a SensorManager!")
self.onSensorChanged.connect(callback)
return self.manager.registerListener(self.getId(), self, rate) | Start listening to sensor events. Sensor event data depends
on the type of sensor that was given to
Parameters
----------
callback: Callable
A callback that takes one argument that will be passed
the sensor data. Sensor data is a dict with data based on
the type of sensor.
rate: Integer
How fast to update. One of the Sensor.SENSOR_DELAY values
Returns
-------
result: Future
A future that resolves to whether the register call
completed. | Below is the the instruction that describes the task:
### Input:
Start listening to sensor events. Sensor event data depends
on the type of sensor that was given to
Parameters
----------
callback: Callable
A callback that takes one argument that will be passed
the sensor data. Sensor data is a dict with data based on
the type of sensor.
rate: Integer
How fast to update. One of the Sensor.SENSOR_DELAY values
Returns
-------
result: Future
A future that resolves to whether the register call
completed.
### Response:
def start(self, callback, rate=SENSOR_DELAY_NORMAL):
""" Start listening to sensor events. Sensor event data depends
on the type of sensor that was given to
Parameters
----------
callback: Callable
A callback that takes one argument that will be passed
the sensor data. Sensor data is a dict with data based on
the type of sensor.
rate: Integer
How fast to update. One of the Sensor.SENSOR_DELAY values
Returns
-------
result: Future
A future that resolves to whether the register call
completed.
"""
if not self.manager:
raise RuntimeError(
"Cannot start a sensor without a SensorManager!")
self.onSensorChanged.connect(callback)
return self.manager.registerListener(self.getId(), self, rate) |
def _debug_log(self, msg):
"""Debug log messages if debug=True"""
if not self.debug:
return
sys.stderr.write('{}\n'.format(msg)) | Debug log messages if debug=True | Below is the the instruction that describes the task:
### Input:
Debug log messages if debug=True
### Response:
def _debug_log(self, msg):
"""Debug log messages if debug=True"""
if not self.debug:
return
sys.stderr.write('{}\n'.format(msg)) |
def getLaneChangeState(self, vehID, direction):
"""getLaneChangeState(string, int) -> (int, int)
Return the lane change state for the vehicle
"""
self._connection._beginMessage(
tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID, 1 + 4)
self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, direction)
result = self._connection._checkResult(tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID)
return result.read("!iBiBi")[2::2] | getLaneChangeState(string, int) -> (int, int)
Return the lane change state for the vehicle | Below is the the instruction that describes the task:
### Input:
getLaneChangeState(string, int) -> (int, int)
Return the lane change state for the vehicle
### Response:
def getLaneChangeState(self, vehID, direction):
"""getLaneChangeState(string, int) -> (int, int)
Return the lane change state for the vehicle
"""
self._connection._beginMessage(
tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID, 1 + 4)
self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, direction)
result = self._connection._checkResult(tc.CMD_GET_VEHICLE_VARIABLE, tc.CMD_CHANGELANE, vehID)
return result.read("!iBiBi")[2::2] |
def start(self, phase, stage, **kwargs):
"""Start a new routine, stage or phase"""
return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs) | Start a new routine, stage or phase | Below is the the instruction that describes the task:
### Input:
Start a new routine, stage or phase
### Response:
def start(self, phase, stage, **kwargs):
"""Start a new routine, stage or phase"""
return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs) |
def calloc(self, sim_nmemb, sim_size):
"""
A somewhat faithful implementation of libc `calloc`.
:param sim_nmemb: the number of elements to allocated
:param sim_size: the size of each element (in bytes)
:returns: the address of the allocation, or a NULL pointer if the allocation failed
"""
raise NotImplementedError("%s not implemented for %s" % (self.calloc.__func__.__name__,
self.__class__.__name__)) | A somewhat faithful implementation of libc `calloc`.
:param sim_nmemb: the number of elements to allocated
:param sim_size: the size of each element (in bytes)
:returns: the address of the allocation, or a NULL pointer if the allocation failed | Below is the the instruction that describes the task:
### Input:
A somewhat faithful implementation of libc `calloc`.
:param sim_nmemb: the number of elements to allocated
:param sim_size: the size of each element (in bytes)
:returns: the address of the allocation, or a NULL pointer if the allocation failed
### Response:
def calloc(self, sim_nmemb, sim_size):
"""
A somewhat faithful implementation of libc `calloc`.
:param sim_nmemb: the number of elements to allocated
:param sim_size: the size of each element (in bytes)
:returns: the address of the allocation, or a NULL pointer if the allocation failed
"""
raise NotImplementedError("%s not implemented for %s" % (self.calloc.__func__.__name__,
self.__class__.__name__)) |
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", sys.exc_info()[1])
self.close_connection = 1
return | Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST. | Below is the the instruction that describes the task:
### Input:
Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
### Response:
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", sys.exc_info()[1])
self.close_connection = 1
return |
def reverse(self, *args, **kwargs):
"""Look up a path by name and fill in the provided parameters.
Example:
>>> handler = lambda: None # just a bogus handler
>>> router = PathRouter(('post', '/posts/{slug}', handler))
>>> router.reverse('post', slug='my-post')
'/posts/my-post'
"""
(name,) = args
return self._templates[name].fill(**kwargs) | Look up a path by name and fill in the provided parameters.
Example:
>>> handler = lambda: None # just a bogus handler
>>> router = PathRouter(('post', '/posts/{slug}', handler))
>>> router.reverse('post', slug='my-post')
'/posts/my-post' | Below is the the instruction that describes the task:
### Input:
Look up a path by name and fill in the provided parameters.
Example:
>>> handler = lambda: None # just a bogus handler
>>> router = PathRouter(('post', '/posts/{slug}', handler))
>>> router.reverse('post', slug='my-post')
'/posts/my-post'
### Response:
def reverse(self, *args, **kwargs):
"""Look up a path by name and fill in the provided parameters.
Example:
>>> handler = lambda: None # just a bogus handler
>>> router = PathRouter(('post', '/posts/{slug}', handler))
>>> router.reverse('post', slug='my-post')
'/posts/my-post'
"""
(name,) = args
return self._templates[name].fill(**kwargs) |
def main():
"""
Input asteroid family, filter type, and image type to query SSOIS
"""
parser = argparse.ArgumentParser(description='Run SSOIS and return the available images in a particular filter.')
parser.add_argument("--filter",
action="store",
default='r',
dest="filter",
choices=['r', 'u'],
help="Passband: default is r.")
parser.add_argument("--family", '-f',
action="store",
default=None,
help='List of objects to query.')
parser.add_argument("--member", '-m',
action="store",
default=None,
help='Member object of family to query.')
args = parser.parse_args()
if args.family != None and args.member == None:
get_family_info(str(args.family), args.filter)
elif args.family == None and args.member != None:
get_member_info(str(args.member), args.filter)
else:
print "Please input either a family or single member name" | Input asteroid family, filter type, and image type to query SSOIS | Below is the the instruction that describes the task:
### Input:
Input asteroid family, filter type, and image type to query SSOIS
### Response:
def main():
"""
Input asteroid family, filter type, and image type to query SSOIS
"""
parser = argparse.ArgumentParser(description='Run SSOIS and return the available images in a particular filter.')
parser.add_argument("--filter",
action="store",
default='r',
dest="filter",
choices=['r', 'u'],
help="Passband: default is r.")
parser.add_argument("--family", '-f',
action="store",
default=None,
help='List of objects to query.')
parser.add_argument("--member", '-m',
action="store",
default=None,
help='Member object of family to query.')
args = parser.parse_args()
if args.family != None and args.member == None:
get_family_info(str(args.family), args.filter)
elif args.family == None and args.member != None:
get_member_info(str(args.member), args.filter)
else:
print "Please input either a family or single member name" |
def to_csc(self):
"""Convert Dataset to scipy's Compressed Sparse Column matrix."""
self._X_train = csc_matrix(self._X_train)
self._X_test = csc_matrix(self._X_test) | Convert Dataset to scipy's Compressed Sparse Column matrix. | Below is the the instruction that describes the task:
### Input:
Convert Dataset to scipy's Compressed Sparse Column matrix.
### Response:
def to_csc(self):
"""Convert Dataset to scipy's Compressed Sparse Column matrix."""
self._X_train = csc_matrix(self._X_train)
self._X_test = csc_matrix(self._X_test) |
def unlink(self):
"""unlinks the shared memory"""
if os.name == "posix":
self.__linux_unlink__()
elif os.name == "nt":
self.__windows_unlink__()
else:
raise HolodeckException("Currently unsupported os: " + os.name) | unlinks the shared memory | Below is the the instruction that describes the task:
### Input:
unlinks the shared memory
### Response:
def unlink(self):
"""unlinks the shared memory"""
if os.name == "posix":
self.__linux_unlink__()
elif os.name == "nt":
self.__windows_unlink__()
else:
raise HolodeckException("Currently unsupported os: " + os.name) |
def _schedule_snapshot_retrieve(dataset, prefix, snapshots):
'''
Update snapshots dict with current snapshots
dataset: string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
snapshots : OrderedDict
preseeded OrderedDict with configuration
'''
## NOTE: retrieve all snapshots for the dataset
for snap in sorted(__salt__['zfs.list'](dataset, **{'recursive': True, 'depth': 1, 'type': 'snapshot'}).keys()):
## NOTE: we only want the actualy name
## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248
snap_name = snap[snap.index('@')+1:]
## NOTE: we only want snapshots matching our prefix
if not snap_name.startswith('{0}-'.format(prefix)):
continue
## NOTE: retrieve the holds for this snapshot
snap_holds = __salt__['zfs.holds'](snap)
## NOTE: this snapshot has no holds, eligable for pruning
if not snap_holds:
snapshots['_prunable'].append(snap)
## NOTE: update snapshots based on holds (if any)
## we are only interested in the ones from our schedule
## if we find any others we skip them
for hold in snap_holds:
if hold in snapshots['_schedule'].keys():
snapshots[hold].append(snap)
return snapshots | Update snapshots dict with current snapshots
dataset: string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
snapshots : OrderedDict
preseeded OrderedDict with configuration | Below is the the instruction that describes the task:
### Input:
Update snapshots dict with current snapshots
dataset: string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
snapshots : OrderedDict
preseeded OrderedDict with configuration
### Response:
def _schedule_snapshot_retrieve(dataset, prefix, snapshots):
'''
Update snapshots dict with current snapshots
dataset: string
name of filesystem or volume
prefix : string
prefix for the snapshots
e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
snapshots : OrderedDict
preseeded OrderedDict with configuration
'''
## NOTE: retrieve all snapshots for the dataset
for snap in sorted(__salt__['zfs.list'](dataset, **{'recursive': True, 'depth': 1, 'type': 'snapshot'}).keys()):
## NOTE: we only want the actualy name
## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248
snap_name = snap[snap.index('@')+1:]
## NOTE: we only want snapshots matching our prefix
if not snap_name.startswith('{0}-'.format(prefix)):
continue
## NOTE: retrieve the holds for this snapshot
snap_holds = __salt__['zfs.holds'](snap)
## NOTE: this snapshot has no holds, eligable for pruning
if not snap_holds:
snapshots['_prunable'].append(snap)
## NOTE: update snapshots based on holds (if any)
## we are only interested in the ones from our schedule
## if we find any others we skip them
for hold in snap_holds:
if hold in snapshots['_schedule'].keys():
snapshots[hold].append(snap)
return snapshots |
def queue_actions(self, source, actions, event_args=None):
"""
Queue a list of \a actions for processing from \a source.
Triggers an aura refresh afterwards.
"""
source.event_args = event_args
ret = self.trigger_actions(source, actions)
source.event_args = None
return ret | Queue a list of \a actions for processing from \a source.
Triggers an aura refresh afterwards. | Below is the the instruction that describes the task:
### Input:
Queue a list of \a actions for processing from \a source.
Triggers an aura refresh afterwards.
### Response:
def queue_actions(self, source, actions, event_args=None):
"""
Queue a list of \a actions for processing from \a source.
Triggers an aura refresh afterwards.
"""
source.event_args = event_args
ret = self.trigger_actions(source, actions)
source.event_args = None
return ret |
def execute(self, correlation_id, args):
"""
Executes the command given specific arguments as an input.
Args:
correlation_id: a unique correlation/transaction id
args: command arguments
Returns: an execution result.
Raises:
MicroserviceError: when execution fails for whatever reason.
"""
return self._intercepter.execute(_next, correlation_id, args) | Executes the command given specific arguments as an input.
Args:
correlation_id: a unique correlation/transaction id
args: command arguments
Returns: an execution result.
Raises:
MicroserviceError: when execution fails for whatever reason. | Below is the the instruction that describes the task:
### Input:
Executes the command given specific arguments as an input.
Args:
correlation_id: a unique correlation/transaction id
args: command arguments
Returns: an execution result.
Raises:
MicroserviceError: when execution fails for whatever reason.
### Response:
def execute(self, correlation_id, args):
"""
Executes the command given specific arguments as an input.
Args:
correlation_id: a unique correlation/transaction id
args: command arguments
Returns: an execution result.
Raises:
MicroserviceError: when execution fails for whatever reason.
"""
return self._intercepter.execute(_next, correlation_id, args) |
def to_joint_gaussian(self):
"""
The linear Gaussian Bayesian Networks are an alternative
representation for the class of multivariate Gaussian distributions.
This method returns an equivalent joint Gaussian distribution.
Returns
-------
GaussianDistribution: An equivalent joint Gaussian
distribution for the network.
Reference
---------
Section 7.2, Example 7.3,
Probabilistic Graphical Models, Principles and Techniques
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> jgd = model.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2', 'x3']
>>> jgd.mean
array([[ 1. ],
[-4.5],
[ 8.5]])
>>> jgd.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
"""
variables = nx.topological_sort(self)
mean = np.zeros(len(variables))
covariance = np.zeros((len(variables), len(variables)))
for node_idx in range(len(variables)):
cpd = self.get_cpds(variables[node_idx])
mean[node_idx] = sum([coeff * mean[variables.index(parent)] for
coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.beta_0
covariance[node_idx, node_idx] = sum(
[coeff * coeff * covariance[variables.index(parent), variables.index(parent)]
for coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.variance
for node_i_idx in range(len(variables)):
for node_j_idx in range(len(variables)):
if covariance[node_j_idx, node_i_idx] != 0:
covariance[node_i_idx, node_j_idx] = covariance[node_j_idx, node_i_idx]
else:
cpd_j = self.get_cpds(variables[node_j_idx])
covariance[node_i_idx, node_j_idx] = sum(
[coeff * covariance[node_i_idx, variables.index(parent)]
for coeff, parent in zip(cpd_j.beta_vector, cpd_j.evidence)])
return GaussianDistribution(variables, mean, covariance) | The linear Gaussian Bayesian Networks are an alternative
representation for the class of multivariate Gaussian distributions.
This method returns an equivalent joint Gaussian distribution.
Returns
-------
GaussianDistribution: An equivalent joint Gaussian
distribution for the network.
Reference
---------
Section 7.2, Example 7.3,
Probabilistic Graphical Models, Principles and Techniques
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> jgd = model.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2', 'x3']
>>> jgd.mean
array([[ 1. ],
[-4.5],
[ 8.5]])
>>> jgd.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]]) | Below is the the instruction that describes the task:
### Input:
The linear Gaussian Bayesian Networks are an alternative
representation for the class of multivariate Gaussian distributions.
This method returns an equivalent joint Gaussian distribution.
Returns
-------
GaussianDistribution: An equivalent joint Gaussian
distribution for the network.
Reference
---------
Section 7.2, Example 7.3,
Probabilistic Graphical Models, Principles and Techniques
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> jgd = model.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2', 'x3']
>>> jgd.mean
array([[ 1. ],
[-4.5],
[ 8.5]])
>>> jgd.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
### Response:
def to_joint_gaussian(self):
"""
The linear Gaussian Bayesian Networks are an alternative
representation for the class of multivariate Gaussian distributions.
This method returns an equivalent joint Gaussian distribution.
Returns
-------
GaussianDistribution: An equivalent joint Gaussian
distribution for the network.
Reference
---------
Section 7.2, Example 7.3,
Probabilistic Graphical Models, Principles and Techniques
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> jgd = model.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2', 'x3']
>>> jgd.mean
array([[ 1. ],
[-4.5],
[ 8.5]])
>>> jgd.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
"""
variables = nx.topological_sort(self)
mean = np.zeros(len(variables))
covariance = np.zeros((len(variables), len(variables)))
for node_idx in range(len(variables)):
cpd = self.get_cpds(variables[node_idx])
mean[node_idx] = sum([coeff * mean[variables.index(parent)] for
coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.beta_0
covariance[node_idx, node_idx] = sum(
[coeff * coeff * covariance[variables.index(parent), variables.index(parent)]
for coeff, parent in zip(cpd.beta_vector, cpd.evidence)]) + cpd.variance
for node_i_idx in range(len(variables)):
for node_j_idx in range(len(variables)):
if covariance[node_j_idx, node_i_idx] != 0:
covariance[node_i_idx, node_j_idx] = covariance[node_j_idx, node_i_idx]
else:
cpd_j = self.get_cpds(variables[node_j_idx])
covariance[node_i_idx, node_j_idx] = sum(
[coeff * covariance[node_i_idx, variables.index(parent)]
for coeff, parent in zip(cpd_j.beta_vector, cpd_j.evidence)])
return GaussianDistribution(variables, mean, covariance) |
def put(self, message, indent=0):
'''
Print message with an indent.
:param message:
:param indent:
:return:
'''
color = self._colors_conf.get(indent + indent % 2, self._colors_conf.get(0, self._default_color))
for chunk in [' ' * indent, self._colors[color], message, self._colors['ENDC']]:
self._device.write(str(chunk))
self._device.write(os.linesep)
self._device.flush() | Print message with an indent.
:param message:
:param indent:
:return: | Below is the the instruction that describes the task:
### Input:
Print message with an indent.
:param message:
:param indent:
:return:
### Response:
def put(self, message, indent=0):
'''
Print message with an indent.
:param message:
:param indent:
:return:
'''
color = self._colors_conf.get(indent + indent % 2, self._colors_conf.get(0, self._default_color))
for chunk in [' ' * indent, self._colors[color], message, self._colors['ENDC']]:
self._device.write(str(chunk))
self._device.write(os.linesep)
self._device.flush() |
def mapreads(data, sample, nthreads, force):
"""
Attempt to map reads to reference sequence. This reads in the fasta files
(samples.files.edits), and maps each read to the reference. Unmapped reads
are dropped right back in the de novo pipeline. Reads that map successfully
are processed and pushed downstream and joined with the rest of the data
post muscle_align.
Mapped reads end up in a sam file.
"""
LOGGER.info("Entering mapreads(): %s %s", sample.name, nthreads)
## This is the input derep file, for paired data we need to split the data,
## and so we will make sample.files.dereps == [derep1, derep2], but for
## SE data we can simply use sample.files.derep == [derepfile].
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
sample.files.dereps = [derepfile]
## This is the final output files containing merged/concat derep'd refmap'd
## reads that did not match to the reference. They will be back in
## merge/concat (--nnnnn--) format ready to be input to vsearch, if needed.
mumapfile = sample.files.unmapped_reads
umap1file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap1.fastq")
umap2file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap2.fastq")
## split the derepfile into the two handles we designate
if "pair" in data.paramsdict["datatype"]:
sample.files.split1 = os.path.join(data.dirs.edits, sample.name+"-split1.fastq")
sample.files.split2 = os.path.join(data.dirs.edits, sample.name+"-split2.fastq")
sample.files.dereps = [sample.files.split1, sample.files.split2]
split_merged_reads(sample.files.dereps, derepfile)
## (cmd1) smalt <task> [TASK_OPTIONS] [<index_name> <file_name_A> [<file_name_B>]]
## -f sam : Output as sam format, tried :clip: to hard mask output
## but it shreds the unmapped reads (outputs empty fq)
## -l [pe,mp,pp]: If paired end select the orientation of each read
## -n # : Number of threads to use
## -x : Perform a more exhaustive search
## -y # : proportion matched to reference (sequence similarity)
## -o : output file
## : Reference sequence
## : Input file(s), in a list. One for R1 and one for R2
## -c # : proportion of the query read length that must be covered
## (cmd1) bwa mem [OPTIONS] <index_name> <file_name_A> [<file_name_B>] > <output_file>
## -t # : Number of threads
## -M : Mark split alignments as secondary.
## (cmd2) samtools view [options] <in.bam>|<in.sam>|<in.cram> [region ...]
## -b = write to .bam
## -q = Only keep reads with mapq score >= 30 (seems to be pretty standard)
## -F = Select all reads that DON'T have these flags.
## 0x4 (segment unmapped)
## 0x100 (Secondary alignment)
## 0x800 (supplementary alignment)
## -U = Write out all reads that don't pass the -F filter
## (all unmapped reads go to this file).
## TODO: Should eventually add `-q 13` to filter low confidence mapping.
## If you do this it will throw away some fraction of reads. Ideally you'd
## catch these and throw them in with the rest of the unmapped reads, but
## I can't think of a straightforward way of doing that. There should be
## a `-Q` flag to only keep reads below the threshold, but i realize that
## would be of limited use besides for me.
## (cmd3) samtools sort [options...] [in.bam]
## -T = Temporary file name, this is required by samtools, ignore it
## Here we hack it to be samhandle.tmp cuz samtools cleans it up
## -O = Output file format, in this case bam
## -o = Output file name
if "smalt" in data._hackersonly["aligner"]:
## The output SAM data is written to file (-o)
## input is either (derep) or (derep-split1, derep-split2)
cmd1 = [ipyrad.bins.smalt, "map",
"-f", "sam",
"-n", str(max(1, nthreads)),
"-y", str(data.paramsdict['clust_threshold']),
"-o", os.path.join(data.dirs.refmapping, sample.name+".sam"),
"-x",
data.paramsdict['reference_sequence']
] + sample.files.dereps
cmd1_stdout = sps.PIPE
cmd1_stderr = sps.STDOUT
else:
cmd1 = [ipyrad.bins.bwa, "mem",
"-t", str(max(1, nthreads)),
"-M",
data.paramsdict['reference_sequence']
] + sample.files.dereps
## Insert optional flags for bwa
try:
bwa_args = data._hackersonly["bwa_args"].split()
bwa_args.reverse()
for arg in bwa_args:
cmd1.insert(2, arg)
except KeyError:
## Do nothing
pass
cmd1_stdout = open(os.path.join(data.dirs.refmapping, sample.name+".sam"), 'w')
cmd1_stderr = None
## Reads in the SAM file from cmd1. It writes the unmapped data to file
## and it pipes the mapped data to be used in cmd3
cmd2 = [ipyrad.bins.samtools, "view",
"-b",
## TODO: This introduces a bug with PE right now. Think about the case where
## R1 has low qual mapping and R2 has high. You get different numbers
## of reads in the unmapped tmp files. FML.
#"-q", "30",
"-F", "0x904",
"-U", os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam"),
os.path.join(data.dirs.refmapping, sample.name+".sam")]
## this is gonna catch mapped bam output from cmd2 and write to file
cmd3 = [ipyrad.bins.samtools, "sort",
"-T", os.path.join(data.dirs.refmapping, sample.name+".sam.tmp"),
"-O", "bam",
"-o", sample.files.mapped_reads]
## TODO: Unnecessary?
## this is gonna read the sorted BAM file and index it. only for pileup?
cmd4 = [ipyrad.bins.samtools, "index", sample.files.mapped_reads]
## this is gonna read in the unmapped files, args are added below,
## and it will output fastq formatted unmapped reads for merging.
## -v 45 sets the default qscore arbitrarily high
cmd5 = [ipyrad.bins.samtools, "bam2fq", "-v 45",
os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")]
## Insert additional arguments for paired data to the commands.
## We assume Illumina paired end reads for the orientation
## of mate pairs (orientation: ---> <----).
if 'pair' in data.paramsdict["datatype"]:
if "smalt" in data._hackersonly["aligner"]:
## add paired flag (-l pe) to cmd1 right after (smalt map ...)
cmd1.insert(2, "pe")
cmd1.insert(2, "-l")
else:
## No special PE flags for bwa
pass
## add samtools filter for only keep if both pairs hit
## 0x1 - Read is paired
## 0x2 - Each read properly aligned
cmd2.insert(2, "0x3")
cmd2.insert(2, "-f")
## tell bam2fq that there are output files for each read pair
cmd5.insert(2, umap1file)
cmd5.insert(2, "-1")
cmd5.insert(2, umap2file)
cmd5.insert(2, "-2")
else:
cmd5.insert(2, mumapfile)
cmd5.insert(2, "-0")
## Running cmd1 creates ref_mapping/sname.sam,
LOGGER.debug(" ".join(cmd1))
proc1 = sps.Popen(cmd1, stderr=cmd1_stderr, stdout=cmd1_stdout)
## This is really long running job so we wrap it to ensure it dies.
try:
error1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
## raise error if one occurred in smalt
if proc1.returncode:
raise IPyradWarningExit(error1)
## Running cmd2 writes to ref_mapping/sname.unmapped.bam, and
## fills the pipe with mapped BAM data
LOGGER.debug(" ".join(cmd2))
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
## Running cmd3 pulls mapped BAM from pipe and writes to
## ref_mapping/sname.mapped-sorted.bam.
## Because proc2 pipes to proc3 we just communicate this to run both.
LOGGER.debug(" ".join(cmd3))
proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout)
error3 = proc3.communicate()[0]
if proc3.returncode:
raise IPyradWarningExit(error3)
proc2.stdout.close()
## Later we're gonna use samtools to grab out regions using 'view', and to
## do that we need it to be indexed. Let's index it now.
LOGGER.debug(" ".join(cmd4))
proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE)
error4 = proc4.communicate()[0]
if proc4.returncode:
raise IPyradWarningExit(error4)
## Running cmd5 writes to either edits/sname-refmap_derep.fastq for SE
## or it makes edits/sname-tmp-umap{12}.fastq for paired data, which
## will then need to be merged.
LOGGER.debug(" ".join(cmd5))
proc5 = sps.Popen(cmd5, stderr=sps.STDOUT, stdout=sps.PIPE)
error5 = proc5.communicate()[0]
if proc5.returncode:
raise IPyradWarningExit(error5)
## Finally, merge the unmapped reads, which is what cluster()
## expects. If SE, just rename the outfile. In the end
## <sample>-refmap_derep.fq will be the final output
if 'pair' in data.paramsdict["datatype"]:
LOGGER.info("Merging unmapped reads {} {}".format(umap1file, umap2file))
merge_pairs_after_refmapping(data, [(umap1file, umap2file)], mumapfile) | Attempt to map reads to reference sequence. This reads in the fasta files
(samples.files.edits), and maps each read to the reference. Unmapped reads
are dropped right back in the de novo pipeline. Reads that map successfully
are processed and pushed downstream and joined with the rest of the data
post muscle_align.
Mapped reads end up in a sam file. | Below is the the instruction that describes the task:
### Input:
Attempt to map reads to reference sequence. This reads in the fasta files
(samples.files.edits), and maps each read to the reference. Unmapped reads
are dropped right back in the de novo pipeline. Reads that map successfully
are processed and pushed downstream and joined with the rest of the data
post muscle_align.
Mapped reads end up in a sam file.
### Response:
def mapreads(data, sample, nthreads, force):
"""
Attempt to map reads to reference sequence. This reads in the fasta files
(samples.files.edits), and maps each read to the reference. Unmapped reads
are dropped right back in the de novo pipeline. Reads that map successfully
are processed and pushed downstream and joined with the rest of the data
post muscle_align.
Mapped reads end up in a sam file.
"""
LOGGER.info("Entering mapreads(): %s %s", sample.name, nthreads)
## This is the input derep file, for paired data we need to split the data,
## and so we will make sample.files.dereps == [derep1, derep2], but for
## SE data we can simply use sample.files.derep == [derepfile].
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
sample.files.dereps = [derepfile]
## This is the final output files containing merged/concat derep'd refmap'd
## reads that did not match to the reference. They will be back in
## merge/concat (--nnnnn--) format ready to be input to vsearch, if needed.
mumapfile = sample.files.unmapped_reads
umap1file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap1.fastq")
umap2file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap2.fastq")
## split the derepfile into the two handles we designate
if "pair" in data.paramsdict["datatype"]:
sample.files.split1 = os.path.join(data.dirs.edits, sample.name+"-split1.fastq")
sample.files.split2 = os.path.join(data.dirs.edits, sample.name+"-split2.fastq")
sample.files.dereps = [sample.files.split1, sample.files.split2]
split_merged_reads(sample.files.dereps, derepfile)
## (cmd1) smalt <task> [TASK_OPTIONS] [<index_name> <file_name_A> [<file_name_B>]]
## -f sam : Output as sam format, tried :clip: to hard mask output
## but it shreds the unmapped reads (outputs empty fq)
## -l [pe,mp,pp]: If paired end select the orientation of each read
## -n # : Number of threads to use
## -x : Perform a more exhaustive search
## -y # : proportion matched to reference (sequence similarity)
## -o : output file
## : Reference sequence
## : Input file(s), in a list. One for R1 and one for R2
## -c # : proportion of the query read length that must be covered
## (cmd1) bwa mem [OPTIONS] <index_name> <file_name_A> [<file_name_B>] > <output_file>
## -t # : Number of threads
## -M : Mark split alignments as secondary.
## (cmd2) samtools view [options] <in.bam>|<in.sam>|<in.cram> [region ...]
## -b = write to .bam
## -q = Only keep reads with mapq score >= 30 (seems to be pretty standard)
## -F = Select all reads that DON'T have these flags.
## 0x4 (segment unmapped)
## 0x100 (Secondary alignment)
## 0x800 (supplementary alignment)
## -U = Write out all reads that don't pass the -F filter
## (all unmapped reads go to this file).
## TODO: Should eventually add `-q 13` to filter low confidence mapping.
## If you do this it will throw away some fraction of reads. Ideally you'd
## catch these and throw them in with the rest of the unmapped reads, but
## I can't think of a straightforward way of doing that. There should be
## a `-Q` flag to only keep reads below the threshold, but i realize that
## would be of limited use besides for me.
## (cmd3) samtools sort [options...] [in.bam]
## -T = Temporary file name, this is required by samtools, ignore it
## Here we hack it to be samhandle.tmp cuz samtools cleans it up
## -O = Output file format, in this case bam
## -o = Output file name
if "smalt" in data._hackersonly["aligner"]:
## The output SAM data is written to file (-o)
## input is either (derep) or (derep-split1, derep-split2)
cmd1 = [ipyrad.bins.smalt, "map",
"-f", "sam",
"-n", str(max(1, nthreads)),
"-y", str(data.paramsdict['clust_threshold']),
"-o", os.path.join(data.dirs.refmapping, sample.name+".sam"),
"-x",
data.paramsdict['reference_sequence']
] + sample.files.dereps
cmd1_stdout = sps.PIPE
cmd1_stderr = sps.STDOUT
else:
cmd1 = [ipyrad.bins.bwa, "mem",
"-t", str(max(1, nthreads)),
"-M",
data.paramsdict['reference_sequence']
] + sample.files.dereps
## Insert optional flags for bwa
try:
bwa_args = data._hackersonly["bwa_args"].split()
bwa_args.reverse()
for arg in bwa_args:
cmd1.insert(2, arg)
except KeyError:
## Do nothing
pass
cmd1_stdout = open(os.path.join(data.dirs.refmapping, sample.name+".sam"), 'w')
cmd1_stderr = None
## Reads in the SAM file from cmd1. It writes the unmapped data to file
## and it pipes the mapped data to be used in cmd3
cmd2 = [ipyrad.bins.samtools, "view",
"-b",
## TODO: This introduces a bug with PE right now. Think about the case where
## R1 has low qual mapping and R2 has high. You get different numbers
## of reads in the unmapped tmp files. FML.
#"-q", "30",
"-F", "0x904",
"-U", os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam"),
os.path.join(data.dirs.refmapping, sample.name+".sam")]
## this is gonna catch mapped bam output from cmd2 and write to file
cmd3 = [ipyrad.bins.samtools, "sort",
"-T", os.path.join(data.dirs.refmapping, sample.name+".sam.tmp"),
"-O", "bam",
"-o", sample.files.mapped_reads]
## TODO: Unnecessary?
## this is gonna read the sorted BAM file and index it. only for pileup?
cmd4 = [ipyrad.bins.samtools, "index", sample.files.mapped_reads]
## this is gonna read in the unmapped files, args are added below,
## and it will output fastq formatted unmapped reads for merging.
## -v 45 sets the default qscore arbitrarily high
cmd5 = [ipyrad.bins.samtools, "bam2fq", "-v 45",
os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")]
## Insert additional arguments for paired data to the commands.
## We assume Illumina paired end reads for the orientation
## of mate pairs (orientation: ---> <----).
if 'pair' in data.paramsdict["datatype"]:
if "smalt" in data._hackersonly["aligner"]:
## add paired flag (-l pe) to cmd1 right after (smalt map ...)
cmd1.insert(2, "pe")
cmd1.insert(2, "-l")
else:
## No special PE flags for bwa
pass
## add samtools filter for only keep if both pairs hit
## 0x1 - Read is paired
## 0x2 - Each read properly aligned
cmd2.insert(2, "0x3")
cmd2.insert(2, "-f")
## tell bam2fq that there are output files for each read pair
cmd5.insert(2, umap1file)
cmd5.insert(2, "-1")
cmd5.insert(2, umap2file)
cmd5.insert(2, "-2")
else:
cmd5.insert(2, mumapfile)
cmd5.insert(2, "-0")
## Running cmd1 creates ref_mapping/sname.sam,
LOGGER.debug(" ".join(cmd1))
proc1 = sps.Popen(cmd1, stderr=cmd1_stderr, stdout=cmd1_stdout)
## This is really long running job so we wrap it to ensure it dies.
try:
error1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
## raise error if one occurred in smalt
if proc1.returncode:
raise IPyradWarningExit(error1)
## Running cmd2 writes to ref_mapping/sname.unmapped.bam, and
## fills the pipe with mapped BAM data
LOGGER.debug(" ".join(cmd2))
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
## Running cmd3 pulls mapped BAM from pipe and writes to
## ref_mapping/sname.mapped-sorted.bam.
## Because proc2 pipes to proc3 we just communicate this to run both.
LOGGER.debug(" ".join(cmd3))
proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout)
error3 = proc3.communicate()[0]
if proc3.returncode:
raise IPyradWarningExit(error3)
proc2.stdout.close()
## Later we're gonna use samtools to grab out regions using 'view', and to
## do that we need it to be indexed. Let's index it now.
LOGGER.debug(" ".join(cmd4))
proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE)
error4 = proc4.communicate()[0]
if proc4.returncode:
raise IPyradWarningExit(error4)
## Running cmd5 writes to either edits/sname-refmap_derep.fastq for SE
## or it makes edits/sname-tmp-umap{12}.fastq for paired data, which
## will then need to be merged.
LOGGER.debug(" ".join(cmd5))
proc5 = sps.Popen(cmd5, stderr=sps.STDOUT, stdout=sps.PIPE)
error5 = proc5.communicate()[0]
if proc5.returncode:
raise IPyradWarningExit(error5)
## Finally, merge the unmapped reads, which is what cluster()
## expects. If SE, just rename the outfile. In the end
## <sample>-refmap_derep.fq will be the final output
if 'pair' in data.paramsdict["datatype"]:
LOGGER.info("Merging unmapped reads {} {}".format(umap1file, umap2file))
merge_pairs_after_refmapping(data, [(umap1file, umap2file)], mumapfile) |
def symbolic(self, A):
"""
Return the symbolic factorization of sparse matrix ``A``
Parameters
----------
sparselib
Library name in ``umfpack`` and ``klu``
A
Sparse matrix
Returns
symbolic factorization
-------
"""
if self.sparselib == 'umfpack':
return umfpack.symbolic(A)
elif self.sparselib == 'klu':
return klu.symbolic(A) | Return the symbolic factorization of sparse matrix ``A``
Parameters
----------
sparselib
Library name in ``umfpack`` and ``klu``
A
Sparse matrix
Returns
symbolic factorization
------- | Below is the the instruction that describes the task:
### Input:
Return the symbolic factorization of sparse matrix ``A``
Parameters
----------
sparselib
Library name in ``umfpack`` and ``klu``
A
Sparse matrix
Returns
symbolic factorization
-------
### Response:
def symbolic(self, A):
"""
Return the symbolic factorization of sparse matrix ``A``
Parameters
----------
sparselib
Library name in ``umfpack`` and ``klu``
A
Sparse matrix
Returns
symbolic factorization
-------
"""
if self.sparselib == 'umfpack':
return umfpack.symbolic(A)
elif self.sparselib == 'klu':
return klu.symbolic(A) |
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i-1], e))
cycles_edges.append(edges)
return cycles_edges | Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list. | Below is the the instruction that describes the task:
### Input:
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
### Response:
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i-1], e))
cycles_edges.append(edges)
return cycles_edges |
def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov) | Compute empirical covariance as baseline estimator. | Below is the the instruction that describes the task:
### Input:
Compute empirical covariance as baseline estimator.
### Response:
def empirical(X):
"""Compute empirical covariance as baseline estimator.
"""
print("Empirical")
cov = np.dot(X.T, X) / n_samples
return cov, np.linalg.inv(cov) |
def _generate(self):
"""
Generates set of consecutive patterns.
"""
n = self._n
w = self._w
assert type(w) is int, "List for w not supported"
for i in xrange(n / w):
pattern = set(xrange(i * w, (i+1) * w))
self._patterns[i] = pattern | Generates set of consecutive patterns. | Below is the the instruction that describes the task:
### Input:
Generates set of consecutive patterns.
### Response:
def _generate(self):
"""
Generates set of consecutive patterns.
"""
n = self._n
w = self._w
assert type(w) is int, "List for w not supported"
for i in xrange(n / w):
pattern = set(xrange(i * w, (i+1) * w))
self._patterns[i] = pattern |
def help(project, task, step, variables):
"""Run a help step."""
task_name = step.args or variables['task']
try:
task = project.find_task(task_name)
except NoSuchTaskError as e:
yield events.task_not_found(task_name, e.similarities)
raise StopTask
text = f'# {task.name}\n'
text += '\n'
text += task.description
text += '\n\n'
text += 'Variables: {}'.format(', '.join(task.variables))
yield events.help_output(text) | Run a help step. | Below is the the instruction that describes the task:
### Input:
Run a help step.
### Response:
def help(project, task, step, variables):
"""Run a help step."""
task_name = step.args or variables['task']
try:
task = project.find_task(task_name)
except NoSuchTaskError as e:
yield events.task_not_found(task_name, e.similarities)
raise StopTask
text = f'# {task.name}\n'
text += '\n'
text += task.description
text += '\n\n'
text += 'Variables: {}'.format(', '.join(task.variables))
yield events.help_output(text) |
def _lookup_proxmox_task(upid):
'''
Retrieve the (latest) logs and retrieve the status for a UPID.
This can be used to verify whether a task has completed.
'''
log.debug('Getting creation status for upid: %s', upid)
tasks = query('get', 'cluster/tasks')
if tasks:
for task in tasks:
if task['upid'] == upid:
log.debug('Found upid task: %s', task)
return task
return False | Retrieve the (latest) logs and retrieve the status for a UPID.
This can be used to verify whether a task has completed. | Below is the the instruction that describes the task:
### Input:
Retrieve the (latest) logs and retrieve the status for a UPID.
This can be used to verify whether a task has completed.
### Response:
def _lookup_proxmox_task(upid):
'''
Retrieve the (latest) logs and retrieve the status for a UPID.
This can be used to verify whether a task has completed.
'''
log.debug('Getting creation status for upid: %s', upid)
tasks = query('get', 'cluster/tasks')
if tasks:
for task in tasks:
if task['upid'] == upid:
log.debug('Found upid task: %s', task)
return task
return False |
def set_default_headers(self, *args, **kwargs):
"""Set the default headers for all requests."""
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers',
'Origin, X-Requested-With, Content-Type, Accept')
self.set_header('Access-Control-Allow-Methods',
'GET, HEAD, PUT, POST, DELETE') | Set the default headers for all requests. | Below is the the instruction that describes the task:
### Input:
Set the default headers for all requests.
### Response:
def set_default_headers(self, *args, **kwargs):
"""Set the default headers for all requests."""
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers',
'Origin, X-Requested-With, Content-Type, Accept')
self.set_header('Access-Control-Allow-Methods',
'GET, HEAD, PUT, POST, DELETE') |
def get_subordinate_clauses(tiger_docgraph):
"""
given a document graph of a TIGER syntax tree, return all
node IDs of nodes representing subordinate clause constituents.
Parameters
----------
tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph
document graph from which subordinate clauses will be extracted
Returns
-------
subord_clause_nodes : list(str)
list of node IDs of nodes directly dominating subordinate clauses
"""
subord_clause_rels = \
dg.select_edges_by_attribute(
tiger_docgraph, attribute='tiger:label',
value=['MO', 'RC', 'SB'])
subord_clause_nodes = []
for src_id, target_id in subord_clause_rels:
src_cat = tiger_docgraph.node[src_id].get('tiger:cat')
if src_cat == 'S' and not dg.istoken(tiger_docgraph, target_id):
subord_clause_nodes.append(target_id)
return subord_clause_nodes | given a document graph of a TIGER syntax tree, return all
node IDs of nodes representing subordinate clause constituents.
Parameters
----------
tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph
document graph from which subordinate clauses will be extracted
Returns
-------
subord_clause_nodes : list(str)
list of node IDs of nodes directly dominating subordinate clauses | Below is the the instruction that describes the task:
### Input:
given a document graph of a TIGER syntax tree, return all
node IDs of nodes representing subordinate clause constituents.
Parameters
----------
tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph
document graph from which subordinate clauses will be extracted
Returns
-------
subord_clause_nodes : list(str)
list of node IDs of nodes directly dominating subordinate clauses
### Response:
def get_subordinate_clauses(tiger_docgraph):
"""
given a document graph of a TIGER syntax tree, return all
node IDs of nodes representing subordinate clause constituents.
Parameters
----------
tiger_docgraph : DiscourseDocumentGraph or TigerDocumentGraph
document graph from which subordinate clauses will be extracted
Returns
-------
subord_clause_nodes : list(str)
list of node IDs of nodes directly dominating subordinate clauses
"""
subord_clause_rels = \
dg.select_edges_by_attribute(
tiger_docgraph, attribute='tiger:label',
value=['MO', 'RC', 'SB'])
subord_clause_nodes = []
for src_id, target_id in subord_clause_rels:
src_cat = tiger_docgraph.node[src_id].get('tiger:cat')
if src_cat == 'S' and not dg.istoken(tiger_docgraph, target_id):
subord_clause_nodes.append(target_id)
return subord_clause_nodes |
def extractfile(self, member):
'''
Extract a member from the archive as a file object. `member' may be
a filename or an RPMInfo object.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
'''
if not isinstance(member, RPMInfo):
member = self.getmember(member)
return _SubFile(self.data_file, member.file_start, member.size) | Extract a member from the archive as a file object. `member' may be
a filename or an RPMInfo object.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell() | Below is the the instruction that describes the task:
### Input:
Extract a member from the archive as a file object. `member' may be
a filename or an RPMInfo object.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
### Response:
def extractfile(self, member):
'''
Extract a member from the archive as a file object. `member' may be
a filename or an RPMInfo object.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
'''
if not isinstance(member, RPMInfo):
member = self.getmember(member)
return _SubFile(self.data_file, member.file_start, member.size) |
def with_updated_configuration(self, options=None,
attribute_options=None):
"""
Returns a context in which this representer is updated with the
given options and attribute options.
"""
return self._mapping.with_updated_configuration(options=options,
attribute_options=
attribute_options) | Returns a context in which this representer is updated with the
given options and attribute options. | Below is the the instruction that describes the task:
### Input:
Returns a context in which this representer is updated with the
given options and attribute options.
### Response:
def with_updated_configuration(self, options=None,
attribute_options=None):
"""
Returns a context in which this representer is updated with the
given options and attribute options.
"""
return self._mapping.with_updated_configuration(options=options,
attribute_options=
attribute_options) |
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is always ignored.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError("Data shape {} must match shape of object {}"
.format(data.shape, self.shape))
return type(self)(self.dims, data, self._attrs,
self._encoding, fastpath=True) | Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is always ignored.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original. | Below is the the instruction that describes the task:
### Input:
Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is always ignored.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
### Response:
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is always ignored.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError("Data shape {} must match shape of object {}"
.format(data.shape, self.shape))
return type(self)(self.dims, data, self._attrs,
self._encoding, fastpath=True) |
def show_page(self, course):
""" Prepares and shows the course page """
username = self.user_manager.session_username()
if not self.user_manager.course_is_open_to_user(course, lti=False):
return self.template_helper.get_renderer().course_unavailable()
else:
tasks = course.get_tasks()
last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}})
for submission in last_submissions:
submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language())
tasks_data = {}
user_tasks = self.database.user_tasks.find({"username": username, "courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}})
is_admin = self.user_manager.has_staff_rights_on_course(course, username)
tasks_score = [0.0, 0.0]
for taskid, task in tasks.items():
tasks_data[taskid] = {"visible": task.get_accessible_time().after_start() or is_admin, "succeeded": False,
"grade": 0.0}
tasks_score[1] += task.get_grading_weight() if tasks_data[taskid]["visible"] else 0
for user_task in user_tasks:
tasks_data[user_task["taskid"]]["succeeded"] = user_task["succeeded"]
tasks_data[user_task["taskid"]]["grade"] = user_task["grade"]
weighted_score = user_task["grade"]*tasks[user_task["taskid"]].get_grading_weight()
tasks_score[0] += weighted_score if tasks_data[user_task["taskid"]]["visible"] else 0
course_grade = round(tasks_score[0]/tasks_score[1]) if tasks_score[1] > 0 else 0
tag_list = course.get_all_tags_names_as_list(is_admin, self.user_manager.session_language())
user_info = self.database.users.find_one({"username": username})
return self.template_helper.get_renderer().course(user_info, course, last_submissions, tasks, tasks_data, course_grade, tag_list) | Prepares and shows the course page | Below is the the instruction that describes the task:
### Input:
Prepares and shows the course page
### Response:
def show_page(self, course):
""" Prepares and shows the course page """
username = self.user_manager.session_username()
if not self.user_manager.course_is_open_to_user(course, lti=False):
return self.template_helper.get_renderer().course_unavailable()
else:
tasks = course.get_tasks()
last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}})
for submission in last_submissions:
submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language())
tasks_data = {}
user_tasks = self.database.user_tasks.find({"username": username, "courseid": course.get_id(), "taskid": {"$in": list(tasks.keys())}})
is_admin = self.user_manager.has_staff_rights_on_course(course, username)
tasks_score = [0.0, 0.0]
for taskid, task in tasks.items():
tasks_data[taskid] = {"visible": task.get_accessible_time().after_start() or is_admin, "succeeded": False,
"grade": 0.0}
tasks_score[1] += task.get_grading_weight() if tasks_data[taskid]["visible"] else 0
for user_task in user_tasks:
tasks_data[user_task["taskid"]]["succeeded"] = user_task["succeeded"]
tasks_data[user_task["taskid"]]["grade"] = user_task["grade"]
weighted_score = user_task["grade"]*tasks[user_task["taskid"]].get_grading_weight()
tasks_score[0] += weighted_score if tasks_data[user_task["taskid"]]["visible"] else 0
course_grade = round(tasks_score[0]/tasks_score[1]) if tasks_score[1] > 0 else 0
tag_list = course.get_all_tags_names_as_list(is_admin, self.user_manager.session_language())
user_info = self.database.users.find_one({"username": username})
return self.template_helper.get_renderer().course(user_info, course, last_submissions, tasks, tasks_data, course_grade, tag_list) |
def fit(self, X, y=None, **kwargs):
"""
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs: keyword arguments passed to Scikit-Learn API.
Returns
-------
self : instance
Returns the instance of the classification score visualizer
"""
# Fit the inner estimator
self.estimator.fit(X, y)
# Extract the classes from the estimator
if self.classes_ is None:
self.classes_ = self.estimator.classes_
# Always return self from fit
return self | Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs: keyword arguments passed to Scikit-Learn API.
Returns
-------
self : instance
Returns the instance of the classification score visualizer | Below is the the instruction that describes the task:
### Input:
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs: keyword arguments passed to Scikit-Learn API.
Returns
-------
self : instance
Returns the instance of the classification score visualizer
### Response:
def fit(self, X, y=None, **kwargs):
"""
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs: keyword arguments passed to Scikit-Learn API.
Returns
-------
self : instance
Returns the instance of the classification score visualizer
"""
# Fit the inner estimator
self.estimator.fit(X, y)
# Extract the classes from the estimator
if self.classes_ is None:
self.classes_ = self.estimator.classes_
# Always return self from fit
return self |
def _DrawHours(self):
"""Generates svg to show a vertical hour and sub-hour grid
Returns:
# A string containing a polyline tag for each grid line
" <polyline class="FullHour" points="20,0 ..."
"""
tmpstrs = []
for i in range(0, self._gwidth, self._min_grid):
if i % self._hour_grid == 0:
tmpstrs.append('<polyline class="FullHour" points="%d,%d, %d,%d" />' \
% (i + .5 + 20, 20, i + .5 + 20, self._gheight))
tmpstrs.append('<text class="Label" x="%d" y="%d">%d</text>'
% (i + 20, 20,
(i / self._hour_grid + self._offset) % 24))
else:
tmpstrs.append('<polyline class="SubHour" points="%d,%d,%d,%d" />' \
% (i + .5 + 20, 20, i + .5 + 20, self._gheight))
return "".join(tmpstrs) | Generates svg to show a vertical hour and sub-hour grid
Returns:
# A string containing a polyline tag for each grid line
" <polyline class="FullHour" points="20,0 ..." | Below is the the instruction that describes the task:
### Input:
Generates svg to show a vertical hour and sub-hour grid
Returns:
# A string containing a polyline tag for each grid line
" <polyline class="FullHour" points="20,0 ..."
### Response:
def _DrawHours(self):
"""Generates svg to show a vertical hour and sub-hour grid
Returns:
# A string containing a polyline tag for each grid line
" <polyline class="FullHour" points="20,0 ..."
"""
tmpstrs = []
for i in range(0, self._gwidth, self._min_grid):
if i % self._hour_grid == 0:
tmpstrs.append('<polyline class="FullHour" points="%d,%d, %d,%d" />' \
% (i + .5 + 20, 20, i + .5 + 20, self._gheight))
tmpstrs.append('<text class="Label" x="%d" y="%d">%d</text>'
% (i + 20, 20,
(i / self._hour_grid + self._offset) % 24))
else:
tmpstrs.append('<polyline class="SubHour" points="%d,%d,%d,%d" />' \
% (i + .5 + 20, 20, i + .5 + 20, self._gheight))
return "".join(tmpstrs) |
def merge_dicts(dict1, dict2, append_lists=False):
"""
Merge the second dict into the first
Not intended to merge list of dicts.
:param append_lists: If true, instead of clobbering a list with the
new value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key], append_lists)
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it. Don't add duplicates.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(
[k for k in dict2[key] if k not in dict1[key]])
else:
dict1[key] = dict2[key]
else:
dict1[key] = dict2[key] | Merge the second dict into the first
Not intended to merge list of dicts.
:param append_lists: If true, instead of clobbering a list with the
new value, append all of the new values onto the original list. | Below is the the instruction that describes the task:
### Input:
Merge the second dict into the first
Not intended to merge list of dicts.
:param append_lists: If true, instead of clobbering a list with the
new value, append all of the new values onto the original list.
### Response:
def merge_dicts(dict1, dict2, append_lists=False):
"""
Merge the second dict into the first
Not intended to merge list of dicts.
:param append_lists: If true, instead of clobbering a list with the
new value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key], append_lists)
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it. Don't add duplicates.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(
[k for k in dict2[key] if k not in dict1[key]])
else:
dict1[key] = dict2[key]
else:
dict1[key] = dict2[key] |
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args) | Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`. | Below is the the instruction that describes the task:
### Input:
Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
### Response:
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args) |
def parametrize(params):
"""Return list of params as params.
>>> parametrize(['a'])
'a'
>>> parametrize(['a', 'b'])
'a[b]'
>>> parametrize(['a', 'b', 'c'])
'a[b][c]'
"""
returned = str(params[0])
returned += "".join("[" + str(p) + "]" for p in params[1:])
return returned | Return list of params as params.
>>> parametrize(['a'])
'a'
>>> parametrize(['a', 'b'])
'a[b]'
>>> parametrize(['a', 'b', 'c'])
'a[b][c]' | Below is the the instruction that describes the task:
### Input:
Return list of params as params.
>>> parametrize(['a'])
'a'
>>> parametrize(['a', 'b'])
'a[b]'
>>> parametrize(['a', 'b', 'c'])
'a[b][c]'
### Response:
def parametrize(params):
"""Return list of params as params.
>>> parametrize(['a'])
'a'
>>> parametrize(['a', 'b'])
'a[b]'
>>> parametrize(['a', 'b', 'c'])
'a[b][c]'
"""
returned = str(params[0])
returned += "".join("[" + str(p) + "]" for p in params[1:])
return returned |
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended | Return a list of packages which need to be installed, to resolve all
dependencies | Below is the the instruction that describes the task:
### Input:
Return a list of packages which need to be installed, to resolve all
dependencies
### Response:
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended |
def _write(self, dap_index, transfer_count,
transfer_request, transfer_data):
"""
Write one or more commands
"""
assert dap_index == 0 # dap index currently unsupported
assert isinstance(transfer_count, six.integer_types)
assert isinstance(transfer_request, six.integer_types)
assert transfer_data is None or len(transfer_data) > 0
# Create transfer and add to transfer list
transfer = None
if transfer_request & READ:
transfer = _Transfer(self, dap_index, transfer_count,
transfer_request, transfer_data)
self._transfer_list.append(transfer)
# Build physical packet by adding it to command
cmd = self._crnt_cmd
is_read = transfer_request & READ
size_to_transfer = transfer_count
trans_data_pos = 0
while size_to_transfer > 0:
# Get the size remaining in the current packet for the given request.
size = cmd.get_request_space(size_to_transfer, transfer_request, dap_index)
# This request doesn't fit in the packet so send it.
if size == 0:
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [size==0]")
self._send_packet()
cmd = self._crnt_cmd
continue
# Add request to packet.
if transfer_data is None:
data = None
else:
data = transfer_data[trans_data_pos:trans_data_pos + size]
cmd.add(size, transfer_request, data, dap_index)
size_to_transfer -= size
trans_data_pos += size
# Packet has been filled so send it
if cmd.get_full():
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [full]")
self._send_packet()
cmd = self._crnt_cmd
if not self._deferred_transfer:
self.flush()
return transfer | Write one or more commands | Below is the the instruction that describes the task:
### Input:
Write one or more commands
### Response:
def _write(self, dap_index, transfer_count,
transfer_request, transfer_data):
"""
Write one or more commands
"""
assert dap_index == 0 # dap index currently unsupported
assert isinstance(transfer_count, six.integer_types)
assert isinstance(transfer_request, six.integer_types)
assert transfer_data is None or len(transfer_data) > 0
# Create transfer and add to transfer list
transfer = None
if transfer_request & READ:
transfer = _Transfer(self, dap_index, transfer_count,
transfer_request, transfer_data)
self._transfer_list.append(transfer)
# Build physical packet by adding it to command
cmd = self._crnt_cmd
is_read = transfer_request & READ
size_to_transfer = transfer_count
trans_data_pos = 0
while size_to_transfer > 0:
# Get the size remaining in the current packet for the given request.
size = cmd.get_request_space(size_to_transfer, transfer_request, dap_index)
# This request doesn't fit in the packet so send it.
if size == 0:
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [size==0]")
self._send_packet()
cmd = self._crnt_cmd
continue
# Add request to packet.
if transfer_data is None:
data = None
else:
data = transfer_data[trans_data_pos:trans_data_pos + size]
cmd.add(size, transfer_request, data, dap_index)
size_to_transfer -= size
trans_data_pos += size
# Packet has been filled so send it
if cmd.get_full():
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [full]")
self._send_packet()
cmd = self._crnt_cmd
if not self._deferred_transfer:
self.flush()
return transfer |
def Split(cls, extended_path_mask):
'''
Splits the given path into their components: recursive, dirname, in_filters and out_filters
:param str: extended_path_mask:
The "extended path mask" to split
:rtype: tuple(bool,bool,str,list(str),list(str))
:returns:
Returns the extended path 5 components:
- The tree-recurse flag
- The flat-recurse flag
- The actual path
- A list of masks to include
- A list of masks to exclude
'''
import os.path
r_tree_recurse = extended_path_mask[0] in '+-'
r_flat_recurse = extended_path_mask[0] in '-'
r_dirname, r_filters = os.path.split(extended_path_mask)
if r_tree_recurse:
r_dirname = r_dirname[1:]
filters = r_filters.split(';')
r_in_filters = [i for i in filters if not i.startswith('!')]
r_out_filters = [i[1:] for i in filters if i.startswith('!')]
return r_tree_recurse, r_flat_recurse, r_dirname, r_in_filters, r_out_filters | Splits the given path into their components: recursive, dirname, in_filters and out_filters
:param str: extended_path_mask:
The "extended path mask" to split
:rtype: tuple(bool,bool,str,list(str),list(str))
:returns:
Returns the extended path 5 components:
- The tree-recurse flag
- The flat-recurse flag
- The actual path
- A list of masks to include
- A list of masks to exclude | Below is the the instruction that describes the task:
### Input:
Splits the given path into their components: recursive, dirname, in_filters and out_filters
:param str: extended_path_mask:
The "extended path mask" to split
:rtype: tuple(bool,bool,str,list(str),list(str))
:returns:
Returns the extended path 5 components:
- The tree-recurse flag
- The flat-recurse flag
- The actual path
- A list of masks to include
- A list of masks to exclude
### Response:
def Split(cls, extended_path_mask):
'''
Splits the given path into their components: recursive, dirname, in_filters and out_filters
:param str: extended_path_mask:
The "extended path mask" to split
:rtype: tuple(bool,bool,str,list(str),list(str))
:returns:
Returns the extended path 5 components:
- The tree-recurse flag
- The flat-recurse flag
- The actual path
- A list of masks to include
- A list of masks to exclude
'''
import os.path
r_tree_recurse = extended_path_mask[0] in '+-'
r_flat_recurse = extended_path_mask[0] in '-'
r_dirname, r_filters = os.path.split(extended_path_mask)
if r_tree_recurse:
r_dirname = r_dirname[1:]
filters = r_filters.split(';')
r_in_filters = [i for i in filters if not i.startswith('!')]
r_out_filters = [i[1:] for i in filters if i.startswith('!')]
return r_tree_recurse, r_flat_recurse, r_dirname, r_in_filters, r_out_filters |
def quaternion(self):
""":obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout.
"""
q_xyzw = transformations.quaternion_from_matrix(self.matrix)
q_wxyz = np.roll(q_xyzw, 1)
return q_wxyz | :obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout. | Below is the the instruction that describes the task:
### Input:
:obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout.
### Response:
def quaternion(self):
""":obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout.
"""
q_xyzw = transformations.quaternion_from_matrix(self.matrix)
q_wxyz = np.roll(q_xyzw, 1)
return q_wxyz |
def get_sub_node(self, node):
"""Extract node from document if desired."""
subnode = node.find('office:document')
if subnode:
mimetype = subnode.attrs['office:mimetype']
self.type = MIMEMAP[mimetype]
node = node.find('office:body')
return node | Extract node from document if desired. | Below is the the instruction that describes the task:
### Input:
Extract node from document if desired.
### Response:
def get_sub_node(self, node):
"""Extract node from document if desired."""
subnode = node.find('office:document')
if subnode:
mimetype = subnode.attrs['office:mimetype']
self.type = MIMEMAP[mimetype]
node = node.find('office:body')
return node |