code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def json_serial(obj):
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable") | JSON serializer for objects not serializable by default json code | ### Input:
JSON serializer for objects not serializable by default json code
### Response:
def json_serial(obj):
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable") |
def scroll(self, vector):
self.center((vector[0] + self.view_rect.centerx,
vector[1] + self.view_rect.centery)) | scroll the background in pixels
:param vector: (int, int) | ### Input:
scroll the background in pixels
:param vector: (int, int)
### Response:
def scroll(self, vector):
self.center((vector[0] + self.view_rect.centerx,
vector[1] + self.view_rect.centery)) |
def _delete(self, pos, idx):
_maxes, _lists, _index = self._maxes, self._lists, self._index
lists_pos = _lists[pos]
del lists_pos[idx]
self._len -= 1
len_lists_pos = len(lists_pos)
if len_lists_pos > self._half:
_maxes[pos] = lists_pos[-1]
if _index:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_lists) > 1:
if not pos:
pos += 1
prev = pos - 1
_lists[prev].extend(_lists[pos])
_maxes[prev] = _lists[prev][-1]
del _maxes[pos]
del _lists[pos]
del _index[:]
self._expand(prev)
elif len_lists_pos:
_maxes[pos] = lists_pos[-1]
else:
del _maxes[pos]
del _lists[pos]
del _index[:] | Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc. | ### Input:
Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc.
### Response:
def _delete(self, pos, idx):
_maxes, _lists, _index = self._maxes, self._lists, self._index
lists_pos = _lists[pos]
del lists_pos[idx]
self._len -= 1
len_lists_pos = len(lists_pos)
if len_lists_pos > self._half:
_maxes[pos] = lists_pos[-1]
if _index:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_lists) > 1:
if not pos:
pos += 1
prev = pos - 1
_lists[prev].extend(_lists[pos])
_maxes[prev] = _lists[prev][-1]
del _maxes[pos]
del _lists[pos]
del _index[:]
self._expand(prev)
elif len_lists_pos:
_maxes[pos] = lists_pos[-1]
else:
del _maxes[pos]
del _lists[pos]
del _index[:] |
def split(self, length, vertical=True):
[left, bottom, right, top] = self.bounds
if vertical:
box = [[left, bottom, left + length, top],
[left + length, bottom, right, top]]
else:
box = [[left, bottom, right, bottom + length],
[left, bottom + length, right, top]]
return box | Returns two bounding boxes representing the current
bounds split into two smaller boxes.
Parameters
-------------
length: float, length to split
vertical: bool, if True will split box vertically
Returns
-------------
box: (2,4) float, two bounding boxes consisting of:
[minx, miny, maxx, maxy] | ### Input:
Returns two bounding boxes representing the current
bounds split into two smaller boxes.
Parameters
-------------
length: float, length to split
vertical: bool, if True will split box vertically
Returns
-------------
box: (2,4) float, two bounding boxes consisting of:
[minx, miny, maxx, maxy]
### Response:
def split(self, length, vertical=True):
[left, bottom, right, top] = self.bounds
if vertical:
box = [[left, bottom, left + length, top],
[left + length, bottom, right, top]]
else:
box = [[left, bottom, right, bottom + length],
[left, bottom + length, right, top]]
return box |
def to_oncotator(self):
if self.type == ".":
ref = self.ref
alt = self.change
start = self.pos
end = self.pos
elif self.type == "-":
ref = self.change
alt = "-"
start = self.pos + 1
end = start + len(self.change)
elif self.type == "+":
ref = "-"
alt = self.change
start = self.pos
end = start + len(self.change)
else:
raise(Exception("Unexpected mutation type: {}".format(self.type)))
return "{chrom}\t{start}\t{end}\t{ref}\t{alt}".format(chrom=self.chrom, start=start,
end=end, ref=ref, alt=alt) | Returns mutation in oncotator input format. Assumes mutations have
vcf/mpileup style positions. | ### Input:
Returns mutation in oncotator input format. Assumes mutations have
vcf/mpileup style positions.
### Response:
def to_oncotator(self):
if self.type == ".":
ref = self.ref
alt = self.change
start = self.pos
end = self.pos
elif self.type == "-":
ref = self.change
alt = "-"
start = self.pos + 1
end = start + len(self.change)
elif self.type == "+":
ref = "-"
alt = self.change
start = self.pos
end = start + len(self.change)
else:
raise(Exception("Unexpected mutation type: {}".format(self.type)))
return "{chrom}\t{start}\t{end}\t{ref}\t{alt}".format(chrom=self.chrom, start=start,
end=end, ref=ref, alt=alt) |
def process_added_port(self, device_details):
device = device_details[]
port_id = device_details[]
reprocess = True
try:
self._process_added_port(device_details)
LOG.debug("Updating cached port %s status as UP.", port_id)
self._update_port_status_cache(device, device_bound=True)
LOG.info("Port %s processed.", port_id)
except os_win_exc.HyperVvNicNotFound:
LOG.debug(
, port_id)
reprocess = False
except os_win_exc.HyperVPortNotFoundException:
LOG.debug(
, port_id)
except Exception as ex:
LOG.exception("Exception encountered while processing "
"port %(port_id)s. Exception: %(ex)s",
dict(port_id=port_id, ex=ex))
else:
reprocess = False
if reprocess:
self._added_ports.add(device)
self._refresh_cache = True
return False
return True | Process the new ports.
Wraps _process_added_port, and treats the sucessful and exception
cases. | ### Input:
Process the new ports.
Wraps _process_added_port, and treats the sucessful and exception
cases.
### Response:
def process_added_port(self, device_details):
device = device_details[]
port_id = device_details[]
reprocess = True
try:
self._process_added_port(device_details)
LOG.debug("Updating cached port %s status as UP.", port_id)
self._update_port_status_cache(device, device_bound=True)
LOG.info("Port %s processed.", port_id)
except os_win_exc.HyperVvNicNotFound:
LOG.debug(
, port_id)
reprocess = False
except os_win_exc.HyperVPortNotFoundException:
LOG.debug(
, port_id)
except Exception as ex:
LOG.exception("Exception encountered while processing "
"port %(port_id)s. Exception: %(ex)s",
dict(port_id=port_id, ex=ex))
else:
reprocess = False
if reprocess:
self._added_ports.add(device)
self._refresh_cache = True
return False
return True |
def format_arrow(value, format_string):
assert isinstance(value, arrow.Arrow)
return value.format(format_string) | Format an arrow datetime object.
:param value: The arrow datetime object.
:param format_string: The date format string
:returns: Returns a string representation of the given arrow datetime
object, formatted according to the given format string.
.. note::
Do not use this filter to format date/times presented to an end
user. Instead use ``datetimeformat`` or ``dateformat`` from
Invenio-I18N. | ### Input:
Format an arrow datetime object.
:param value: The arrow datetime object.
:param format_string: The date format string
:returns: Returns a string representation of the given arrow datetime
object, formatted according to the given format string.
.. note::
Do not use this filter to format date/times presented to an end
user. Instead use ``datetimeformat`` or ``dateformat`` from
Invenio-I18N.
### Response:
def format_arrow(value, format_string):
assert isinstance(value, arrow.Arrow)
return value.format(format_string) |
def add_transition_to_state(from_port, to_port):
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
responsible_parent_m = None
if isinstance(from_port, IncomeView):
from_state_id = None
from_outcome_id = None
responsible_parent_m = from_state_m
if isinstance(to_port, IncomeView):
to_outcome_id = None
elif isinstance(to_port, OutcomeView):
to_outcome_id = to_port.outcome_id
elif isinstance(from_port, OutcomeView):
from_outcome_id = from_port.outcome_id
if isinstance(to_port, IncomeView):
responsible_parent_m = from_state_m.parent
to_outcome_id = None
elif isinstance(to_port, OutcomeView):
responsible_parent_m = to_state_m
to_outcome_id = to_port.outcome_id
else:
raise ValueError("Invalid port type")
from rafcon.gui.models.container_state import ContainerStateModel
if not responsible_parent_m:
logger.error("Transitions only exist between incomes and outcomes. Given: {0} and {1}".format(type(
from_port), type(to_port)))
return False
elif not isinstance(responsible_parent_m, ContainerStateModel):
logger.error("Transitions only exist in container states (e.g. hierarchy states)")
return False
try:
t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id)
if from_state_id == to_state_id:
gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id,
combined_action=True)
return True
except (ValueError, AttributeError, TypeError) as e:
logger.error("Transition couldn't be added: {0}".format(e))
return False | Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred | ### Input:
Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
### Response:
def add_transition_to_state(from_port, to_port):
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
responsible_parent_m = None
if isinstance(from_port, IncomeView):
from_state_id = None
from_outcome_id = None
responsible_parent_m = from_state_m
if isinstance(to_port, IncomeView):
to_outcome_id = None
elif isinstance(to_port, OutcomeView):
to_outcome_id = to_port.outcome_id
elif isinstance(from_port, OutcomeView):
from_outcome_id = from_port.outcome_id
if isinstance(to_port, IncomeView):
responsible_parent_m = from_state_m.parent
to_outcome_id = None
elif isinstance(to_port, OutcomeView):
responsible_parent_m = to_state_m
to_outcome_id = to_port.outcome_id
else:
raise ValueError("Invalid port type")
from rafcon.gui.models.container_state import ContainerStateModel
if not responsible_parent_m:
logger.error("Transitions only exist between incomes and outcomes. Given: {0} and {1}".format(type(
from_port), type(to_port)))
return False
elif not isinstance(responsible_parent_m, ContainerStateModel):
logger.error("Transitions only exist in container states (e.g. hierarchy states)")
return False
try:
t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id)
if from_state_id == to_state_id:
gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id,
combined_action=True)
return True
except (ValueError, AttributeError, TypeError) as e:
logger.error("Transition couldn't be added: {0}".format(e))
return False |
def QA_indicator_MACD(DataFrame, short=12, long=26, mid=9):
CLOSE = DataFrame[]
DIF = EMA(CLOSE, short)-EMA(CLOSE, long)
DEA = EMA(DIF, mid)
MACD = (DIF-DEA)*2
return pd.DataFrame({: DIF, : DEA, : MACD}) | MACD CALC | ### Input:
MACD CALC
### Response:
def QA_indicator_MACD(DataFrame, short=12, long=26, mid=9):
CLOSE = DataFrame[]
DIF = EMA(CLOSE, short)-EMA(CLOSE, long)
DEA = EMA(DIF, mid)
MACD = (DIF-DEA)*2
return pd.DataFrame({: DIF, : DEA, : MACD}) |
def _quantityToReal(self, quantity):
if not quantity:
return 1.0
try:
return float(quantity.replace(, ))
except ValueError:
pass
try:
return float(self.ptc.numbers[quantity])
except KeyError:
pass
return 0.0 | Convert a quantity, either spelled-out or numeric, to a float
@type quantity: string
@param quantity: quantity to parse to float
@rtype: int
@return: the quantity as an float, defaulting to 0.0 | ### Input:
Convert a quantity, either spelled-out or numeric, to a float
@type quantity: string
@param quantity: quantity to parse to float
@rtype: int
@return: the quantity as an float, defaulting to 0.0
### Response:
def _quantityToReal(self, quantity):
if not quantity:
return 1.0
try:
return float(quantity.replace(, ))
except ValueError:
pass
try:
return float(self.ptc.numbers[quantity])
except KeyError:
pass
return 0.0 |
def pause_writing(self):
if not self.is_closing():
self._can_send.clear()
self.transport.pause_reading() | Transport calls when the send buffer is full. | ### Input:
Transport calls when the send buffer is full.
### Response:
def pause_writing(self):
if not self.is_closing():
self._can_send.clear()
self.transport.pause_reading() |
def on_okButton(self, event):
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options[] = HUJI_file
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options[] = outfile
user = self.bSizer1.return_value()
options[] = user
if user:
user = + user
experiment_type = self.bSizer2.return_value()
options[] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options[] = lab_field_list[0]
options[] = lab_field_list[1]
options[] = lab_field_list[2]
lab_field = + lab_field
spc = self.bSizer4.return_value()
options[] = spc or 0
if not spc:
spc =
else:
spc = + spc
ncn = self.bSizer5.return_value()
options[] = ncn
loc_name = self.bSizer6.return_value()
options[] = loc_name
if loc_name:
loc_name = + loc_name
peak_AF = self.bSizer7.return_value()
options[] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options[] = 0
replicate =
else:
options[] = 1
replicate =
old_format= self.bSizer0a.return_value()
if old_format:
COMMAND = "huji_magic.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF, replicate)
program_ran, error_message = huji_magic.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
else:
COMMAND = "huji_magic_new.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF)
program_ran, error_message = huji_magic_new.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) | grab user input values, format them, and run huji_magic.py with the appropriate flags | ### Input:
grab user input values, format them, and run huji_magic.py with the appropriate flags
### Response:
def on_okButton(self, event):
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options[] = HUJI_file
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options[] = outfile
user = self.bSizer1.return_value()
options[] = user
if user:
user = + user
experiment_type = self.bSizer2.return_value()
options[] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options[] = lab_field_list[0]
options[] = lab_field_list[1]
options[] = lab_field_list[2]
lab_field = + lab_field
spc = self.bSizer4.return_value()
options[] = spc or 0
if not spc:
spc =
else:
spc = + spc
ncn = self.bSizer5.return_value()
options[] = ncn
loc_name = self.bSizer6.return_value()
options[] = loc_name
if loc_name:
loc_name = + loc_name
peak_AF = self.bSizer7.return_value()
options[] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options[] = 0
replicate =
else:
options[] = 1
replicate =
old_format= self.bSizer0a.return_value()
if old_format:
COMMAND = "huji_magic.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF, replicate)
program_ran, error_message = huji_magic.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
else:
COMMAND = "huji_magic_new.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF)
program_ran, error_message = huji_magic_new.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) |
def buttons(self):
all_buttons = []
for buttons in self._buttons.values():
all_buttons += buttons
return all_buttons | Returns all the buttons linked to this edit.
:return [<QToolButton>, ..] | ### Input:
Returns all the buttons linked to this edit.
:return [<QToolButton>, ..]
### Response:
def buttons(self):
all_buttons = []
for buttons in self._buttons.values():
all_buttons += buttons
return all_buttons |
def on_play_speed(self, *args):
Clock.unschedule(self.play)
Clock.schedule_interval(self.play, 1.0 / self.play_speed) | Change the interval at which ``self.play`` is called to match my
current ``play_speed``. | ### Input:
Change the interval at which ``self.play`` is called to match my
current ``play_speed``.
### Response:
def on_play_speed(self, *args):
Clock.unschedule(self.play)
Clock.schedule_interval(self.play, 1.0 / self.play_speed) |
def get_inventory_descriptions(self):
yield "System"
self.init_sdr()
for fruid in sorted(self._sdr.fru):
yield self._sdr.fru[fruid].fru_name
self.oem_init()
for compname in self._oem.get_oem_inventory_descriptions():
yield compname | Retrieve list of things that could be inventoried
This permits a caller to examine the available items
without actually causing the inventory data to be gathered. It
returns an iterable of string descriptions | ### Input:
Retrieve list of things that could be inventoried
This permits a caller to examine the available items
without actually causing the inventory data to be gathered. It
returns an iterable of string descriptions
### Response:
def get_inventory_descriptions(self):
yield "System"
self.init_sdr()
for fruid in sorted(self._sdr.fru):
yield self._sdr.fru[fruid].fru_name
self.oem_init()
for compname in self._oem.get_oem_inventory_descriptions():
yield compname |
def Wait(self):
time.sleep(self.sleep_time - int(self.sleep_time))
for _ in range(int(self.sleep_time)):
time.sleep(1)
self.sleep_time = min(self.poll_max,
max(self.poll_min, self.sleep_time) * self.poll_slew) | Wait until the next action is needed. | ### Input:
Wait until the next action is needed.
### Response:
def Wait(self):
time.sleep(self.sleep_time - int(self.sleep_time))
for _ in range(int(self.sleep_time)):
time.sleep(1)
self.sleep_time = min(self.poll_max,
max(self.poll_min, self.sleep_time) * self.poll_slew) |
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self | Apply `processor` or `self.processor` to `self`. | ### Input:
Apply `processor` or `self.processor` to `self`.
### Response:
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self |
def is_first_instance_aws():
try:
instance_details = requests.get(,
timeout=5).json()
instance_id = instance_details[]
instance_region = instance_details[]
except (requests.RequestException, ValueError, KeyError) as e:
raise StackInterrogationException(e)
try:
)
except (ClientError, AssertionError) as e:
raise StackInterrogationException(e)
return bool(autoscaling_group_instance_ids and autoscaling_group_instance_ids[0] == instance_id) | Returns True if the current instance is the first instance in the ASG group,
sorted by instance_id. | ### Input:
Returns True if the current instance is the first instance in the ASG group,
sorted by instance_id.
### Response:
def is_first_instance_aws():
try:
instance_details = requests.get(,
timeout=5).json()
instance_id = instance_details[]
instance_region = instance_details[]
except (requests.RequestException, ValueError, KeyError) as e:
raise StackInterrogationException(e)
try:
)
except (ClientError, AssertionError) as e:
raise StackInterrogationException(e)
return bool(autoscaling_group_instance_ids and autoscaling_group_instance_ids[0] == instance_id) |
def taskinfo(self):
task_input = {: ,
: {"Task_Name": self._name}}
info = taskengine.execute(task_input, self._engine, cwd=self._cwd)
task_def = info[][]
task_def[] = str(task_def.pop())
task_def[] = str(task_def.pop())
task_def[] = str(task_def.pop())
if in task_def:
task_def[] = task_def.pop()
if in task_def:
task_def[] = task_def.pop()
task_def[] = \
[v for v in task_def[].values()]
task_def.pop()
parameters = task_def[]
for parameter in parameters:
parameter[] = str(parameter.pop())
parameter[] = str(parameter.pop())
parameter[] = str(parameter.pop())
parameter[] = bool(parameter.pop())
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if parameter[].count():
parameter[], parameter[] = parameter.pop().split()
parameter[] = + parameter[]
parameter[] = str(parameter[])
else:
parameter[] = str(parameter.pop().split()[0])
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop().lower()
if in parameter:
if parameter[] is not None:
parameter[] = parameter.pop()
else:
parameter.pop()
if in parameter:
if parameter[] is not None:
parameter[] = parameter.pop()
else:
parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
return task_def | Retrieve the Task Information | ### Input:
Retrieve the Task Information
### Response:
def taskinfo(self):
task_input = {: ,
: {"Task_Name": self._name}}
info = taskengine.execute(task_input, self._engine, cwd=self._cwd)
task_def = info[][]
task_def[] = str(task_def.pop())
task_def[] = str(task_def.pop())
task_def[] = str(task_def.pop())
if in task_def:
task_def[] = task_def.pop()
if in task_def:
task_def[] = task_def.pop()
task_def[] = \
[v for v in task_def[].values()]
task_def.pop()
parameters = task_def[]
for parameter in parameters:
parameter[] = str(parameter.pop())
parameter[] = str(parameter.pop())
parameter[] = str(parameter.pop())
parameter[] = bool(parameter.pop())
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if parameter[].count():
parameter[], parameter[] = parameter.pop().split()
parameter[] = + parameter[]
parameter[] = str(parameter[])
else:
parameter[] = str(parameter.pop().split()[0])
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop().lower()
if in parameter:
if parameter[] is not None:
parameter[] = parameter.pop()
else:
parameter.pop()
if in parameter:
if parameter[] is not None:
parameter[] = parameter.pop()
else:
parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
if in parameter:
parameter[] = parameter.pop()
return task_def |
def expand_filenames(self, filenames):
results = []
for filename in filenames:
result = filename
if "$" in filename:
template = Template(filename)
result = template.substitute(**self.environment)
logging.debug(
"Expanding {} to {}.".format(filename, result))
if any([pattern in result for pattern in "*[]?"]):
expanded = glob.glob(result)
if len(expanded) > 0:
result = expanded
else:
result = "NONEXISTENT"
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
return sorted(list(set(results))) | Expand a list of filenames using environment variables,
followed by expansion of shell-style wildcards. | ### Input:
Expand a list of filenames using environment variables,
followed by expansion of shell-style wildcards.
### Response:
def expand_filenames(self, filenames):
results = []
for filename in filenames:
result = filename
if "$" in filename:
template = Template(filename)
result = template.substitute(**self.environment)
logging.debug(
"Expanding {} to {}.".format(filename, result))
if any([pattern in result for pattern in "*[]?"]):
expanded = glob.glob(result)
if len(expanded) > 0:
result = expanded
else:
result = "NONEXISTENT"
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
return sorted(list(set(results))) |
def add_dynamic_element(self, name, description):
self._pb.add(Name=name, Description=description, Value="*")
return self | Adds a dynamic namespace element to the end of the Namespace.
A dynamic namespace element is defined by an element that contains a
non-static data relative to the metric being collected. For instance,
when collecting metrics for a given virtual machine the namespace
element that contains the virtual-machine-id would be dynamic. This is
modeled by the a NamespaceElement when its `name` attribute contains the
value 'virtual-machine-id'. In this example the `value` attribute would
be set to the ID of the virtual machine when the metric is collected.
Args:
value (:py:class:`snap_plugin.v1.namespace_element.NamespaceElement`):
namespace element
Returns:
:py:class:`snap_plugin.v1.namespace.Namespace` | ### Input:
Adds a dynamic namespace element to the end of the Namespace.
A dynamic namespace element is defined by an element that contains a
non-static data relative to the metric being collected. For instance,
when collecting metrics for a given virtual machine the namespace
element that contains the virtual-machine-id would be dynamic. This is
modeled by the a NamespaceElement when its `name` attribute contains the
value 'virtual-machine-id'. In this example the `value` attribute would
be set to the ID of the virtual machine when the metric is collected.
Args:
value (:py:class:`snap_plugin.v1.namespace_element.NamespaceElement`):
namespace element
Returns:
:py:class:`snap_plugin.v1.namespace.Namespace`
### Response:
def add_dynamic_element(self, name, description):
self._pb.add(Name=name, Description=description, Value="*")
return self |
def files(self, payload):
url = "{url_base}/resource/{pid}/files/".format(url_base=self.hs.url_base,
pid=self.pid)
encoder = MultipartEncoder({
"file": (payload[], open(payload[], )),
"folder": payload[]
})
monitor = MultipartEncoderMonitor(encoder, default_progress_callback)
r = self.hs._request(, url, None, data=monitor, headers={: monitor.content_type})
return r.text | Upload a file to a hydroshare resource.
:param payload:
file: File object to upload to server
folder: folder path to upload the file to
:return: json object
resource_id: string resource id,
file_name: string name of file | ### Input:
Upload a file to a hydroshare resource.
:param payload:
file: File object to upload to server
folder: folder path to upload the file to
:return: json object
resource_id: string resource id,
file_name: string name of file
### Response:
def files(self, payload):
url = "{url_base}/resource/{pid}/files/".format(url_base=self.hs.url_base,
pid=self.pid)
encoder = MultipartEncoder({
"file": (payload[], open(payload[], )),
"folder": payload[]
})
monitor = MultipartEncoderMonitor(encoder, default_progress_callback)
r = self.hs._request(, url, None, data=monitor, headers={: monitor.content_type})
return r.text |
def escape_identifier(text, reg=KWD_RE):
if not text:
return "_"
if text[0].isdigit():
text = "_" + text
return reg.sub(r"\1_", text) | Escape partial C identifiers so they can be used as
attributes/arguments | ### Input:
Escape partial C identifiers so they can be used as
attributes/arguments
### Response:
def escape_identifier(text, reg=KWD_RE):
if not text:
return "_"
if text[0].isdigit():
text = "_" + text
return reg.sub(r"\1_", text) |
def unpack(self, buff=None, offset=0):
band_type = UBInt16(enum_ref=MeterBandType)
band_type.unpack(buff, offset)
self.__class__ = MeterBandType(band_type.value).find_class()
length = UBInt16()
length.unpack(buff, offset=offset+2)
super().unpack(buff[:offset+length.value], offset) | Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails. | ### Input:
Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
### Response:
def unpack(self, buff=None, offset=0):
band_type = UBInt16(enum_ref=MeterBandType)
band_type.unpack(buff, offset)
self.__class__ = MeterBandType(band_type.value).find_class()
length = UBInt16()
length.unpack(buff, offset=offset+2)
super().unpack(buff[:offset+length.value], offset) |
async def add_alternative(self, alt, timeout=OTGW_DEFAULT_TIMEOUT):
cmd = OTGW_CMD_ADD_ALT
alt = int(alt)
if alt < 1 or alt > 255:
return None
ret = await self._wait_for_cmd(cmd, alt, timeout)
if ret is not None:
return int(ret) | Add the specified Data-ID to the list of alternative commands
to send to the boiler instead of a Data-ID that is known to be
unsupported by the boiler. Alternative Data-IDs will always be
sent to the boiler in a Read-Data request message with the
data-value set to zero. The table of alternative Data-IDs is
stored in non-volatile memory so it will persist even if the
gateway has been powered off. Data-ID values from 1 to 255 are
allowed.
Return the ID that was added to the list, or None on failure.
This method is a coroutine | ### Input:
Add the specified Data-ID to the list of alternative commands
to send to the boiler instead of a Data-ID that is known to be
unsupported by the boiler. Alternative Data-IDs will always be
sent to the boiler in a Read-Data request message with the
data-value set to zero. The table of alternative Data-IDs is
stored in non-volatile memory so it will persist even if the
gateway has been powered off. Data-ID values from 1 to 255 are
allowed.
Return the ID that was added to the list, or None on failure.
This method is a coroutine
### Response:
async def add_alternative(self, alt, timeout=OTGW_DEFAULT_TIMEOUT):
cmd = OTGW_CMD_ADD_ALT
alt = int(alt)
if alt < 1 or alt > 255:
return None
ret = await self._wait_for_cmd(cmd, alt, timeout)
if ret is not None:
return int(ret) |
def is_unit_acceptable(self, unit, raise_exception=True):
_is_acceptable = unit in self.units
if _is_acceptable or raise_exception is False:
return _is_acceptable
else:
raise ValueError(
.format(
unit, self.__class__.__name__, self.units
)
) | Check if a certain unit is acceptable for the data type.
Args:
unit: A text string representing the abbreviated unit.
raise_exception: Set to True to raise an exception if not acceptable. | ### Input:
Check if a certain unit is acceptable for the data type.
Args:
unit: A text string representing the abbreviated unit.
raise_exception: Set to True to raise an exception if not acceptable.
### Response:
def is_unit_acceptable(self, unit, raise_exception=True):
_is_acceptable = unit in self.units
if _is_acceptable or raise_exception is False:
return _is_acceptable
else:
raise ValueError(
.format(
unit, self.__class__.__name__, self.units
)
) |
def Compile(self, filter_implementation):
operator = self.operator.lower()
if operator in (, ):
method =
elif operator in (, ):
method =
else:
raise errors.ParseError(
.format(operator))
args = [x.Compile(filter_implementation) for x in self.args]
return getattr(filter_implementation, method)(*args) | Compile the binary expression into a filter object. | ### Input:
Compile the binary expression into a filter object.
### Response:
def Compile(self, filter_implementation):
operator = self.operator.lower()
if operator in (, ):
method =
elif operator in (, ):
method =
else:
raise errors.ParseError(
.format(operator))
args = [x.Compile(filter_implementation) for x in self.args]
return getattr(filter_implementation, method)(*args) |
def interconnect_link_topologies(self):
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies | Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies: | ### Input:
Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies:
### Response:
def interconnect_link_topologies(self):
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies |
def _domain_event_tunable_cb(conn, domain, params, opaque):
_salt_send_domain_event(opaque, conn, domain, opaque[], {
: params
}) | Domain tunable events handler | ### Input:
Domain tunable events handler
### Response:
def _domain_event_tunable_cb(conn, domain, params, opaque):
_salt_send_domain_event(opaque, conn, domain, opaque[], {
: params
}) |
def put(self, request, bot_id, id, format=None):
return super(StateDetail, self).put(request, bot_id, id, format) | Update existing state
---
serializer: StateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request | ### Input:
Update existing state
---
serializer: StateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
### Response:
def put(self, request, bot_id, id, format=None):
return super(StateDetail, self).put(request, bot_id, id, format) |
def _CaptureExpression(self, frame, expression):
rc, value = _EvaluateExpression(frame, expression)
if not rc:
return {: expression, : value}
return self.CaptureNamedVariable(expression, value, 0,
self.expression_capture_limits) | Evalutes the expression and captures it into a Variable object.
Args:
frame: evaluation context.
expression: watched expression to compile and evaluate.
Returns:
Variable object (which will have error status if the expression fails
to evaluate). | ### Input:
Evalutes the expression and captures it into a Variable object.
Args:
frame: evaluation context.
expression: watched expression to compile and evaluate.
Returns:
Variable object (which will have error status if the expression fails
to evaluate).
### Response:
def _CaptureExpression(self, frame, expression):
rc, value = _EvaluateExpression(frame, expression)
if not rc:
return {: expression, : value}
return self.CaptureNamedVariable(expression, value, 0,
self.expression_capture_limits) |
def _needs_base64_encoding(self, attr_type, attr_value):
return attr_type.lower() in self._base64_attrs or \
isinstance(attr_value, bytes) or \
UNSAFE_STRING_RE.search(attr_value) is not None | Return True if attr_value has to be base-64 encoded.
This is the case because of special chars or because attr_type is in
self._base64_attrs | ### Input:
Return True if attr_value has to be base-64 encoded.
This is the case because of special chars or because attr_type is in
self._base64_attrs
### Response:
def _needs_base64_encoding(self, attr_type, attr_value):
return attr_type.lower() in self._base64_attrs or \
isinstance(attr_value, bytes) or \
UNSAFE_STRING_RE.search(attr_value) is not None |
def _from_matrix(cls, matrix):
try:
shape = matrix.shape
except AttributeError:
raise TypeError("Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix")
if shape == (3, 3):
R = matrix
elif shape == (4,4):
R = matrix[:-1][:,:-1]
else:
raise ValueError("Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix")
if not np.allclose(np.dot(R, R.conj().transpose()), np.eye(3)):
raise ValueError("Matrix must be orthogonal, i.e. its transpose should be its inverse")
if not np.isclose(np.linalg.det(R), 1.0):
raise ValueError("Matrix must be special orthogonal i.e. its determinant must be +1.0")
def decomposition_method(matrix):
x, y, z = 0, 1, 2
K = np.array([
[R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],
[R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],
[R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],
[R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]
])
K = K / 3.0
e_vals, e_vecs = np.linalg.eig(K)
print(, e_vals)
print(, e_vecs)
max_index = np.argmax(e_vals)
principal_component = e_vecs[max_index]
return principal_component
def trace_method(matrix):
m = matrix.conj().transpose()
if m[2, 2] < 0:
if m[0, 0] > m[1, 1]:
t = 1 + m[0, 0] - m[1, 1] - m[2, 2]
q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]]
else:
t = 1 - m[0, 0] + m[1, 1] - m[2, 2]
q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]]
else:
if m[0, 0] < -m[1, 1]:
t = 1 - m[0, 0] - m[1, 1] + m[2, 2]
q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t]
else:
t = 1 + m[0, 0] + m[1, 1] + m[2, 2]
q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]]
q = np.array(q)
q *= 0.5 / sqrt(t);
return q
return cls(array=trace_method(R)) | Initialise from matrix representation
Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix
(as a numpy array) from which the quaternion's rotation should be created. | ### Input:
Initialise from matrix representation
Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix
(as a numpy array) from which the quaternion's rotation should be created.
### Response:
def _from_matrix(cls, matrix):
try:
shape = matrix.shape
except AttributeError:
raise TypeError("Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix")
if shape == (3, 3):
R = matrix
elif shape == (4,4):
R = matrix[:-1][:,:-1]
else:
raise ValueError("Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix")
if not np.allclose(np.dot(R, R.conj().transpose()), np.eye(3)):
raise ValueError("Matrix must be orthogonal, i.e. its transpose should be its inverse")
if not np.isclose(np.linalg.det(R), 1.0):
raise ValueError("Matrix must be special orthogonal i.e. its determinant must be +1.0")
def decomposition_method(matrix):
x, y, z = 0, 1, 2
K = np.array([
[R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],
[R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],
[R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],
[R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]
])
K = K / 3.0
e_vals, e_vecs = np.linalg.eig(K)
print(, e_vals)
print(, e_vecs)
max_index = np.argmax(e_vals)
principal_component = e_vecs[max_index]
return principal_component
def trace_method(matrix):
m = matrix.conj().transpose()
if m[2, 2] < 0:
if m[0, 0] > m[1, 1]:
t = 1 + m[0, 0] - m[1, 1] - m[2, 2]
q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]]
else:
t = 1 - m[0, 0] + m[1, 1] - m[2, 2]
q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]]
else:
if m[0, 0] < -m[1, 1]:
t = 1 - m[0, 0] - m[1, 1] + m[2, 2]
q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t]
else:
t = 1 + m[0, 0] + m[1, 1] + m[2, 2]
q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]]
q = np.array(q)
q *= 0.5 / sqrt(t);
return q
return cls(array=trace_method(R)) |
def _generateInsertStatement(self, dataset_name, cols):
col_names = [col["varname"] for col in cols]
qms = .join([ for x in col_names])
return % (dataset_name, .join(col_names), qms) | Generates a sql INSERT template | ### Input:
Generates a sql INSERT template
### Response:
def _generateInsertStatement(self, dataset_name, cols):
col_names = [col["varname"] for col in cols]
qms = .join([ for x in col_names])
return % (dataset_name, .join(col_names), qms) |
def generate(self, x, **kwargs):
assert self.parse_params(**kwargs)
asserts = []
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
momentum = tf.zeros_like(x)
adv_x = x
y, _nb_classes = self.get_or_guess_labels(x, kwargs)
y = y / reduce_sum(y, 1, keepdims=True)
targeted = (self.y_target is not None)
def cond(i, _, __):
return tf.less(i, self.nb_iter)
def body(i, ax, m):
logits = self.model.get_logits(ax)
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
grad, = tf.gradients(loss, ax)
red_ind = list(range(1, len(grad.get_shape())))
avoid_zero_div = tf.cast(1e-12, grad.dtype)
grad = grad / tf.maximum(
avoid_zero_div,
reduce_mean(tf.abs(grad), red_ind, keepdims=True))
m = self.decay_factor * m + grad
optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
if self.ord == 1:
raise NotImplementedError("This attack hasns not clear that FGM makes a good inner "
"loop step for iterative optimization since "
"it updates just one coordinate at a time.")
ax = ax + optimal_perturbation
ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)
if self.clip_min is not None and self.clip_max is not None:
ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)
ax = tf.stop_gradient(ax)
return i + 1, ax, m
_, adv_x, _ = tf.while_loop(
cond, body, (tf.zeros([]), adv_x, momentum), back_prop=True,
maximum_iterations=self.nb_iter)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x | Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation. | ### Input:
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation.
### Response:
def generate(self, x, **kwargs):
assert self.parse_params(**kwargs)
asserts = []
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
momentum = tf.zeros_like(x)
adv_x = x
y, _nb_classes = self.get_or_guess_labels(x, kwargs)
y = y / reduce_sum(y, 1, keepdims=True)
targeted = (self.y_target is not None)
def cond(i, _, __):
return tf.less(i, self.nb_iter)
def body(i, ax, m):
logits = self.model.get_logits(ax)
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
grad, = tf.gradients(loss, ax)
red_ind = list(range(1, len(grad.get_shape())))
avoid_zero_div = tf.cast(1e-12, grad.dtype)
grad = grad / tf.maximum(
avoid_zero_div,
reduce_mean(tf.abs(grad), red_ind, keepdims=True))
m = self.decay_factor * m + grad
optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
if self.ord == 1:
raise NotImplementedError("This attack hasns not clear that FGM makes a good inner "
"loop step for iterative optimization since "
"it updates just one coordinate at a time.")
ax = ax + optimal_perturbation
ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)
if self.clip_min is not None and self.clip_max is not None:
ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)
ax = tf.stop_gradient(ax)
return i + 1, ax, m
_, adv_x, _ = tf.while_loop(
cond, body, (tf.zeros([]), adv_x, momentum), back_prop=True,
maximum_iterations=self.nb_iter)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x |
def ExportEvents(
self, knowledge_base_object, storage_reader, output_module,
processing_configuration, deduplicate_events=True, event_filter=None,
status_update_callback=None, time_slice=None, use_time_slicer=False):
self._events_status = processing_status.EventsStatus()
self._processing_configuration = processing_configuration
self._status_update_callback = status_update_callback
storage_reader.ReadPreprocessingInformation(knowledge_base_object)
total_number_of_events = 0
for session in storage_reader.GetSessions():
total_number_of_events += session.parsers_counter[]
self._events_status.total_number_of_events = total_number_of_events
output_module.Open()
output_module.WriteHeader()
self._StartStatusUpdateThread()
self._StartProfiling(self._processing_configuration.profiling)
try:
self._ExportEvents(
storage_reader, output_module, deduplicate_events=deduplicate_events,
event_filter=event_filter, time_slice=time_slice,
use_time_slicer=use_time_slicer)
finally:
self._StopStatusUpdateThread()
output_module.WriteFooter()
output_module.Close()
self._StopProfiling()
self._UpdateForemanProcessStatus()
if self._status_update_callback:
self._status_update_callback(self._processing_status)
self._status_update_callback = None
self._processing_configuration = None
self._events_status = None | Exports events using an output module.
Args:
knowledge_base_object (KnowledgeBase): contains information from
the source data needed for processing.
storage_reader (StorageReader): storage reader.
output_module (OutputModule): output module.
processing_configuration (ProcessingConfiguration): processing
configuration.
deduplicate_events (Optional[bool]): True if events should be
deduplicated.
event_filter (Optional[FilterObject]): event filter.
status_update_callback (Optional[function]): callback function for status
updates.
time_slice (Optional[TimeSlice]): slice of time to output.
use_time_slicer (Optional[bool]): True if the 'time slicer' should be
used. The 'time slicer' will provide a context of events around
an event of interest. | ### Input:
Exports events using an output module.
Args:
knowledge_base_object (KnowledgeBase): contains information from
the source data needed for processing.
storage_reader (StorageReader): storage reader.
output_module (OutputModule): output module.
processing_configuration (ProcessingConfiguration): processing
configuration.
deduplicate_events (Optional[bool]): True if events should be
deduplicated.
event_filter (Optional[FilterObject]): event filter.
status_update_callback (Optional[function]): callback function for status
updates.
time_slice (Optional[TimeSlice]): slice of time to output.
use_time_slicer (Optional[bool]): True if the 'time slicer' should be
used. The 'time slicer' will provide a context of events around
an event of interest.
### Response:
def ExportEvents(
self, knowledge_base_object, storage_reader, output_module,
processing_configuration, deduplicate_events=True, event_filter=None,
status_update_callback=None, time_slice=None, use_time_slicer=False):
self._events_status = processing_status.EventsStatus()
self._processing_configuration = processing_configuration
self._status_update_callback = status_update_callback
storage_reader.ReadPreprocessingInformation(knowledge_base_object)
total_number_of_events = 0
for session in storage_reader.GetSessions():
total_number_of_events += session.parsers_counter[]
self._events_status.total_number_of_events = total_number_of_events
output_module.Open()
output_module.WriteHeader()
self._StartStatusUpdateThread()
self._StartProfiling(self._processing_configuration.profiling)
try:
self._ExportEvents(
storage_reader, output_module, deduplicate_events=deduplicate_events,
event_filter=event_filter, time_slice=time_slice,
use_time_slicer=use_time_slicer)
finally:
self._StopStatusUpdateThread()
output_module.WriteFooter()
output_module.Close()
self._StopProfiling()
self._UpdateForemanProcessStatus()
if self._status_update_callback:
self._status_update_callback(self._processing_status)
self._status_update_callback = None
self._processing_configuration = None
self._events_status = None |
def addEntry(self, key=, value=):
img = resources.find()
new_item = XTreeWidgetItem()
new_item.setText(1, nativestring(key))
new_item.setText(2, nativestring(value))
new_item.setIcon(0, QtGui.QIcon(img))
new_item.setFixedHeight(22)
self.insertTopLevelItem(self.topLevelItemCount() - 1, new_item)
return new_item | Creates a new entry item for this widget.
:param key | <str>
value | <variant> | ### Input:
Creates a new entry item for this widget.
:param key | <str>
value | <variant>
### Response:
def addEntry(self, key=, value=):
img = resources.find()
new_item = XTreeWidgetItem()
new_item.setText(1, nativestring(key))
new_item.setText(2, nativestring(value))
new_item.setIcon(0, QtGui.QIcon(img))
new_item.setFixedHeight(22)
self.insertTopLevelItem(self.topLevelItemCount() - 1, new_item)
return new_item |
def filter_human_only(stmts_in, **kwargs):
from indra.databases import uniprot_client
if in kwargs and kwargs[]:
remove_bound = True
else:
remove_bound = False
dump_pkl = kwargs.get()
logger.info( %
len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get()
if upid and not uniprot_client.is_human(upid):
return False
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break
if human_genes:
stmts_out.append(st)
logger.info( % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements. | ### Input:
Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
### Response:
def filter_human_only(stmts_in, **kwargs):
from indra.databases import uniprot_client
if in kwargs and kwargs[]:
remove_bound = True
else:
remove_bound = False
dump_pkl = kwargs.get()
logger.info( %
len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get()
if upid and not uniprot_client.is_human(upid):
return False
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break
if human_genes:
stmts_out.append(st)
logger.info( % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out |
def _get_associated_instancenames(self, inst_name, namespace, assoc_class,
result_class, result_role, role):
instance_repo = self._get_instance_repo(namespace)
result_classes = self._classnamedict(result_class, namespace)
assoc_classes = self._classnamedict(assoc_class, namespace)
inst_name.namespace = namespace
rtn_instpaths = []
role = role.lower() if role else role
result_role = result_role.lower() if result_role else result_role
ref_paths = self._get_reference_instnames(inst_name, namespace,
assoc_class, role)
for ref_path in ref_paths:
inst = self._find_instance(ref_path, instance_repo)[1]
for prop in six.itervalues(inst.properties):
if prop.type == :
if prop.value == inst_name:
if assoc_class and inst.classname not in assoc_classes:
continue
if role and prop.name.lower() != role:
continue
else:
if result_class and (prop.value.classname
not in result_classes):
continue
if result_role and prop.name.lower() != result_role:
continue
self._appendpath_unique(rtn_instpaths, prop.value)
return rtn_instpaths | Get the reference instances from the repository for the target
instname and filtered by the result_class and role parameters.
Returns a list of the reference instance names. The returned list is
the original, not a copy so the user must copy them | ### Input:
Get the reference instances from the repository for the target
instname and filtered by the result_class and role parameters.
Returns a list of the reference instance names. The returned list is
the original, not a copy so the user must copy them
### Response:
def _get_associated_instancenames(self, inst_name, namespace, assoc_class,
result_class, result_role, role):
instance_repo = self._get_instance_repo(namespace)
result_classes = self._classnamedict(result_class, namespace)
assoc_classes = self._classnamedict(assoc_class, namespace)
inst_name.namespace = namespace
rtn_instpaths = []
role = role.lower() if role else role
result_role = result_role.lower() if result_role else result_role
ref_paths = self._get_reference_instnames(inst_name, namespace,
assoc_class, role)
for ref_path in ref_paths:
inst = self._find_instance(ref_path, instance_repo)[1]
for prop in six.itervalues(inst.properties):
if prop.type == :
if prop.value == inst_name:
if assoc_class and inst.classname not in assoc_classes:
continue
if role and prop.name.lower() != role:
continue
else:
if result_class and (prop.value.classname
not in result_classes):
continue
if result_role and prop.name.lower() != result_role:
continue
self._appendpath_unique(rtn_instpaths, prop.value)
return rtn_instpaths |
def detect_log_config(arguments):
config = arguments[]
if config is None:
config = detect_config_path()
if not os.path.exists(config):
error_exit( % config)
with open(config) as f:
config_str = f.read()
access_logs = dict(get_access_logs(config_str))
if not access_logs:
error_exit( % config)
log_formats = dict(get_log_formats(config_str))
if len(access_logs) == 1:
log_path, format_name = list(access_logs.items())[0]
if format_name == :
return log_path, LOG_FORMAT_COMBINED
if format_name not in log_formats:
error_exit( % log_path)
return log_path, log_formats[format_name]
print()
log_path = choose_one(list(access_logs.keys()), )
format_name = access_logs[log_path]
if format_name not in log_formats:
error_exit( % log_path)
return log_path, log_formats[format_name] | Detect access log config (path and format) of nginx. Offer user to select if multiple access logs are detected.
:return: path and format of detected / selected access log | ### Input:
Detect access log config (path and format) of nginx. Offer user to select if multiple access logs are detected.
:return: path and format of detected / selected access log
### Response:
def detect_log_config(arguments):
config = arguments[]
if config is None:
config = detect_config_path()
if not os.path.exists(config):
error_exit( % config)
with open(config) as f:
config_str = f.read()
access_logs = dict(get_access_logs(config_str))
if not access_logs:
error_exit( % config)
log_formats = dict(get_log_formats(config_str))
if len(access_logs) == 1:
log_path, format_name = list(access_logs.items())[0]
if format_name == :
return log_path, LOG_FORMAT_COMBINED
if format_name not in log_formats:
error_exit( % log_path)
return log_path, log_formats[format_name]
print()
log_path = choose_one(list(access_logs.keys()), )
format_name = access_logs[log_path]
if format_name not in log_formats:
error_exit( % log_path)
return log_path, log_formats[format_name] |
def get_annotation_date(self, r_term):
annotation_date_list = list(self.graph.triples((r_term, self.spdx_namespace[], None)))
if len(annotation_date_list) != 1:
self.error = True
msg =
self.logger.log(msg)
return
return six.text_type(annotation_date_list[0][2]) | Returns annotation date or None if not found.
Reports error on failure.
Note does not check value format. | ### Input:
Returns annotation date or None if not found.
Reports error on failure.
Note does not check value format.
### Response:
def get_annotation_date(self, r_term):
annotation_date_list = list(self.graph.triples((r_term, self.spdx_namespace[], None)))
if len(annotation_date_list) != 1:
self.error = True
msg =
self.logger.log(msg)
return
return six.text_type(annotation_date_list[0][2]) |
def street_address(self):
pattern = self.random_element(self.street_address_formats)
return self.generator.parse(pattern) | :example '791 Crist Parks' | ### Input:
:example '791 Crist Parks'
### Response:
def street_address(self):
pattern = self.random_element(self.street_address_formats)
return self.generator.parse(pattern) |
def add(self, response, label=None):
if not isinstance(response, sip_response.sip_response):
raise Exception(
)
self.objects.append(response)
if label is None:
self.labels.append()
else:
self.labels.append(label) | add one response object to the list | ### Input:
add one response object to the list
### Response:
def add(self, response, label=None):
if not isinstance(response, sip_response.sip_response):
raise Exception(
)
self.objects.append(response)
if label is None:
self.labels.append()
else:
self.labels.append(label) |
def configure_logging(level):
global logging_level
logging_level = logging.ERROR
if "info" == level.lower():
logging_level = logging.INFO
elif "warn" == level.lower():
logging_level = logging.WARNING
elif "debug" == level.lower():
logging_level = logging.DEBUG | Configure global log level to given one
:param level: Level (INFO | DEBUG | WARN | ERROR)
:return: | ### Input:
Configure global log level to given one
:param level: Level (INFO | DEBUG | WARN | ERROR)
:return:
### Response:
def configure_logging(level):
global logging_level
logging_level = logging.ERROR
if "info" == level.lower():
logging_level = logging.INFO
elif "warn" == level.lower():
logging_level = logging.WARNING
elif "debug" == level.lower():
logging_level = logging.DEBUG |
def bake_content(request):
ident_hash = request.matchdict[]
try:
id, version = split_ident_hash(ident_hash)
except IdentHashError:
raise httpexceptions.HTTPNotFound()
if not version:
raise httpexceptions.HTTPBadRequest()
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(, (ident_hash,))
try:
is_binder, stateid, module_ident = cursor.fetchone()
except TypeError:
raise httpexceptions.HTTPNotFound()
if not is_binder:
raise httpexceptions.HTTPBadRequest(
.format(ident_hash))
if stateid == 5:
cursor.execute(, (module_ident, ident_hash))
else:
cursor.execute(, (ident_hash,)) | Invoke the baking process - trigger post-publication | ### Input:
Invoke the baking process - trigger post-publication
### Response:
def bake_content(request):
ident_hash = request.matchdict[]
try:
id, version = split_ident_hash(ident_hash)
except IdentHashError:
raise httpexceptions.HTTPNotFound()
if not version:
raise httpexceptions.HTTPBadRequest()
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(, (ident_hash,))
try:
is_binder, stateid, module_ident = cursor.fetchone()
except TypeError:
raise httpexceptions.HTTPNotFound()
if not is_binder:
raise httpexceptions.HTTPBadRequest(
.format(ident_hash))
if stateid == 5:
cursor.execute(, (module_ident, ident_hash))
else:
cursor.execute(, (ident_hash,)) |
def time_to_sec(time_str: str) -> int:
total_sec = 0
if in time_str:
days, time_str = time_str.split()
total_sec += (int(days) * 24 * 60 * 60)
hours_min_raw = time_str.split()[:-1]
time_parts = [int(round(float(val))) for val in hours_min_raw]
total_sec += time_parts[-1] * 60
if len(time_parts) > 1:
total_sec += time_parts[-2] * 60 * 60
return total_sec | Convert time in string format to seconds.
Skipping seconds since sometimes the last column is truncated
for entries where >10 days. | ### Input:
Convert time in string format to seconds.
Skipping seconds since sometimes the last column is truncated
for entries where >10 days.
### Response:
def time_to_sec(time_str: str) -> int:
total_sec = 0
if in time_str:
days, time_str = time_str.split()
total_sec += (int(days) * 24 * 60 * 60)
hours_min_raw = time_str.split()[:-1]
time_parts = [int(round(float(val))) for val in hours_min_raw]
total_sec += time_parts[-1] * 60
if len(time_parts) > 1:
total_sec += time_parts[-2] * 60 * 60
return total_sec |
def stop(self, reason=):
key = % (self.session[0].id, quote_plus(reason))
return self._server.query(key) | Stop playback for a media item. | ### Input:
Stop playback for a media item.
### Response:
def stop(self, reason=):
key = % (self.session[0].id, quote_plus(reason))
return self._server.query(key) |
def hard_reset(self):
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None | Resets the iterator and ignore roll over data | ### Input:
Resets the iterator and ignore roll over data
### Response:
def hard_reset(self):
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None |
def summary_plot(data):
cats = OrderedDict()
cats = {
: {
: ,
:
},
: {
: ,
:
},
: {
: ,
:
},
: {
: ,
:
}
}
splotconfig = {: ,
: ,
: ,
: False }
return bargraph.plot(data, cats, splotconfig) | Barplot of combined pairs | ### Input:
Barplot of combined pairs
### Response:
def summary_plot(data):
cats = OrderedDict()
cats = {
: {
: ,
:
},
: {
: ,
:
},
: {
: ,
:
},
: {
: ,
:
}
}
splotconfig = {: ,
: ,
: ,
: False }
return bargraph.plot(data, cats, splotconfig) |
def write(self, data, timeout_s=None):
self.connected.wait(timeout_s)
self.protocol.transport.write(data) | Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready. | ### Input:
Write to serial port.
Waits for serial connection to be established before writing.
Parameters
----------
data : str or bytes
Data to write to serial port.
timeout_s : float, optional
Maximum number of seconds to wait for serial connection to be
established.
By default, block until serial connection is ready.
### Response:
def write(self, data, timeout_s=None):
self.connected.wait(timeout_s)
self.protocol.transport.write(data) |
def twos_complement(rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat):
s complement conversion of a binary number
Input handshake & data
rx_rdy - (o) Ready
rx_vld - (i) Valid
rx_dat - (i) Data
Output handshake & data
tx_rdy - (i) Ready
tx_vld - (o) Valid
tx_dat - (o) Data
Implementation: 3-stage pipeline
stage 0: registers input data
stage 1: inverts data coming from stage 0 and registers the inverted data
stage 2: increments data coming from stage 1 and registers the incremented data
Each stage is implemented as a separate process controlled by a central pipeline control unit via an enable signal
The pipeline control unit manages the handshake and synchronizes the operation of the stages
Register input data Invert data Add one to data'
if (stage_en[2]):
s2_dat.next = s1_dat + 1
@always_comb
def comb():
tx_dat.next = s2_dat.signed()
return instances() | Two's complement conversion of a binary number
Input handshake & data
rx_rdy - (o) Ready
rx_vld - (i) Valid
rx_dat - (i) Data
Output handshake & data
tx_rdy - (i) Ready
tx_vld - (o) Valid
tx_dat - (o) Data
Implementation: 3-stage pipeline
stage 0: registers input data
stage 1: inverts data coming from stage 0 and registers the inverted data
stage 2: increments data coming from stage 1 and registers the incremented data
Each stage is implemented as a separate process controlled by a central pipeline control unit via an enable signal
The pipeline control unit manages the handshake and synchronizes the operation of the stages | ### Input:
Two's complement conversion of a binary number
Input handshake & data
rx_rdy - (o) Ready
rx_vld - (i) Valid
rx_dat - (i) Data
Output handshake & data
tx_rdy - (i) Ready
tx_vld - (o) Valid
tx_dat - (o) Data
Implementation: 3-stage pipeline
stage 0: registers input data
stage 1: inverts data coming from stage 0 and registers the inverted data
stage 2: increments data coming from stage 1 and registers the incremented data
Each stage is implemented as a separate process controlled by a central pipeline control unit via an enable signal
The pipeline control unit manages the handshake and synchronizes the operation of the stages
### Response:
def twos_complement(rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat):
s complement conversion of a binary number
Input handshake & data
rx_rdy - (o) Ready
rx_vld - (i) Valid
rx_dat - (i) Data
Output handshake & data
tx_rdy - (i) Ready
tx_vld - (o) Valid
tx_dat - (o) Data
Implementation: 3-stage pipeline
stage 0: registers input data
stage 1: inverts data coming from stage 0 and registers the inverted data
stage 2: increments data coming from stage 1 and registers the incremented data
Each stage is implemented as a separate process controlled by a central pipeline control unit via an enable signal
The pipeline control unit manages the handshake and synchronizes the operation of the stages
Register input data Invert data Add one to data'
if (stage_en[2]):
s2_dat.next = s1_dat + 1
@always_comb
def comb():
tx_dat.next = s2_dat.signed()
return instances() |
def gradient_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs,
output_filter_mask=None, out_annotation=None):
seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc}
for k in seqs:
if not isinstance(seqs[k], (list, tuple, np.ndarray)):
raise Exception("At the moment only models with list, tuple or np.ndarray inputs are supported.")
assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"])
assert get_seq_len(ref)[0] == mutation_positions.shape[0]
assert len(mutation_positions.shape) == 1
if output_filter_mask is None:
if out_annotation is None:
output_filter_mask = np.arange(out_annotation_all_outputs.shape[0])
else:
output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
out_annotation = out_annotation_all_outputs[output_filter_mask]
sal_funcs = __generate_direct_saliency_functions__(model, out_annotation_all_outputs, out_annotation)
preds = {}
for k in seqs:
preds[k] = {}
if "_rc" in k:
mutated_positions_here = get_seq_len(ref)[1] - 1 - mutation_positions
else:
mutated_positions_here = mutation_positions
for l in out_annotation:
preds[k][l] = predict_vals(input_data=seqs[k], apply_function=__get_direct_saliencies__,
score_func=sal_funcs[l], mutated_positions=mutated_positions_here, model = model)
diff_ret_dGrad = {}
pred_out = {"ref": {}, "alt": {}}
for k in preds["ref"]:
diff_fwd = general_diff(preds["alt"][k]["dGrad"], preds["ref"][k]["dGrad"])
diff_rc = general_diff(preds["alt_rc"][k]["dGrad"], preds["ref_rc"][k]["dGrad"])
sel = general_sel(diff_fwd, diff_rc)
replace_by_sel(diff_fwd, diff_rc, sel)
diff_ret_dGrad[k] = diff_fwd
replace_by_sel(preds["ref"][k]["dGrad"], preds["ref_rc"][k]["dGrad"], sel)
replace_by_sel(preds["alt"][k]["dGrad"], preds["alt_rc"][k]["dGrad"], sel)
pred_out["ref"][k] = preds["ref"][k]["dGrad"]
pred_out["alt"][k] = preds["alt"][k]["dGrad"]
return {"diff": pd.DataFrame(diff_ret_dGrad),
"ref": pd.DataFrame(pred_out["ref"]),
"alt": pd.DataFrame(pred_out["alt"])} | Gradient-based (saliency) variant effect prediction
Based on the idea of [saliency maps](https://arxiv.org/pdf/1312.6034.pdf) the gradient-based prediction of
variant effects uses the `gradient` function of the Keras backend to estimate the importance of a variant
for a given output. This value is then multiplied by the input, as recommended by
[Shrikumar et al., 2017](https://arxiv.org/pdf/1605.01713.pdf).
# Arguments
model: Keras model
ref: Input sequence with the reference genotype in the mutation position
ref_rc: Reverse complement of the 'ref' argument
alt: Input sequence with the alternative genotype in the mutation position
alt_rc: Reverse complement of the 'alt' argument
mutation_positions: Position on which the mutation was placed in the forward sequences
out_annotation_all_outputs: Output labels of the model.
output_filter_mask: Mask of boolean values indicating which model outputs should be used.
Use this or 'out_annotation'
out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the
predictions should be calculated.
# Returns
Dictionary with three different entries:
- ref: Gradient * input at the mutation position using the reference sequence.
Forward or reverse-complement sequence is chose based on sequence direction caused
the bigger absolute difference ('diff')
- alt: Gradient * input at the mutation position using the alternative sequence. Forward or
reverse-complement sequence is chose based on sequence direction caused the bigger
absolute difference ('diff')
- diff: 'alt' - 'ref'. Forward or reverse-complement sequence is chose based on sequence
direction caused the bigger absolute difference. | ### Input:
Gradient-based (saliency) variant effect prediction
Based on the idea of [saliency maps](https://arxiv.org/pdf/1312.6034.pdf) the gradient-based prediction of
variant effects uses the `gradient` function of the Keras backend to estimate the importance of a variant
for a given output. This value is then multiplied by the input, as recommended by
[Shrikumar et al., 2017](https://arxiv.org/pdf/1605.01713.pdf).
# Arguments
model: Keras model
ref: Input sequence with the reference genotype in the mutation position
ref_rc: Reverse complement of the 'ref' argument
alt: Input sequence with the alternative genotype in the mutation position
alt_rc: Reverse complement of the 'alt' argument
mutation_positions: Position on which the mutation was placed in the forward sequences
out_annotation_all_outputs: Output labels of the model.
output_filter_mask: Mask of boolean values indicating which model outputs should be used.
Use this or 'out_annotation'
out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the
predictions should be calculated.
# Returns
Dictionary with three different entries:
- ref: Gradient * input at the mutation position using the reference sequence.
Forward or reverse-complement sequence is chose based on sequence direction caused
the bigger absolute difference ('diff')
- alt: Gradient * input at the mutation position using the alternative sequence. Forward or
reverse-complement sequence is chose based on sequence direction caused the bigger
absolute difference ('diff')
- diff: 'alt' - 'ref'. Forward or reverse-complement sequence is chose based on sequence
direction caused the bigger absolute difference.
### Response:
def gradient_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs,
output_filter_mask=None, out_annotation=None):
seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc}
for k in seqs:
if not isinstance(seqs[k], (list, tuple, np.ndarray)):
raise Exception("At the moment only models with list, tuple or np.ndarray inputs are supported.")
assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"])
assert get_seq_len(ref)[0] == mutation_positions.shape[0]
assert len(mutation_positions.shape) == 1
if output_filter_mask is None:
if out_annotation is None:
output_filter_mask = np.arange(out_annotation_all_outputs.shape[0])
else:
output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
out_annotation = out_annotation_all_outputs[output_filter_mask]
sal_funcs = __generate_direct_saliency_functions__(model, out_annotation_all_outputs, out_annotation)
preds = {}
for k in seqs:
preds[k] = {}
if "_rc" in k:
mutated_positions_here = get_seq_len(ref)[1] - 1 - mutation_positions
else:
mutated_positions_here = mutation_positions
for l in out_annotation:
preds[k][l] = predict_vals(input_data=seqs[k], apply_function=__get_direct_saliencies__,
score_func=sal_funcs[l], mutated_positions=mutated_positions_here, model = model)
diff_ret_dGrad = {}
pred_out = {"ref": {}, "alt": {}}
for k in preds["ref"]:
diff_fwd = general_diff(preds["alt"][k]["dGrad"], preds["ref"][k]["dGrad"])
diff_rc = general_diff(preds["alt_rc"][k]["dGrad"], preds["ref_rc"][k]["dGrad"])
sel = general_sel(diff_fwd, diff_rc)
replace_by_sel(diff_fwd, diff_rc, sel)
diff_ret_dGrad[k] = diff_fwd
replace_by_sel(preds["ref"][k]["dGrad"], preds["ref_rc"][k]["dGrad"], sel)
replace_by_sel(preds["alt"][k]["dGrad"], preds["alt_rc"][k]["dGrad"], sel)
pred_out["ref"][k] = preds["ref"][k]["dGrad"]
pred_out["alt"][k] = preds["alt"][k]["dGrad"]
return {"diff": pd.DataFrame(diff_ret_dGrad),
"ref": pd.DataFrame(pred_out["ref"]),
"alt": pd.DataFrame(pred_out["alt"])} |
def __cancel(self, preapproval_id, **kwargs):
params = {
: preapproval_id
}
return self.make_call(self.__cancel, params, kwargs) | Call documentation: `/preapproval/cancel
<https://www.wepay.com/developer/reference/preapproval#cancel>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay` | ### Input:
Call documentation: `/preapproval/cancel
<https://www.wepay.com/developer/reference/preapproval#cancel>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
### Response:
def __cancel(self, preapproval_id, **kwargs):
params = {
: preapproval_id
}
return self.make_call(self.__cancel, params, kwargs) |
def run(self):
self.state = True
if self.capture:
self.display2.screen().root.grab_keyboard(X.KeyPressMask | X.KeyReleaseMask, X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime)
self.display2.record_enable_context(self.ctx, self.handler)
self.display2.record_free_context(self.ctx) | Begin listening for keyboard input events. | ### Input:
Begin listening for keyboard input events.
### Response:
def run(self):
self.state = True
if self.capture:
self.display2.screen().root.grab_keyboard(X.KeyPressMask | X.KeyReleaseMask, X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime)
self.display2.record_enable_context(self.ctx, self.handler)
self.display2.record_free_context(self.ctx) |
def _fill(self):
right_now = time.time()
time_diff = right_now - self._last_fill
if time_diff < 0:
return
self._count = min(
self._count + self._fill_rate * time_diff,
self._capacity,
)
self._last_fill = right_now | Fills bucket with accrued tokens since last fill. | ### Input:
Fills bucket with accrued tokens since last fill.
### Response:
def _fill(self):
right_now = time.time()
time_diff = right_now - self._last_fill
if time_diff < 0:
return
self._count = min(
self._count + self._fill_rate * time_diff,
self._capacity,
)
self._last_fill = right_now |
def serialize(obj, no_dump=False):
if hasattr(obj, "serialize") and isinstance(obj.serialize, collections.Callable):
o_dict = {
: "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__),
: obj.serialize()
}
elif isinstance(obj, dict):
o_dict = {}
for key, value in list(obj.items()):
o_dict[key] = serialize(value, True)
elif isinstance(obj, (list, set)):
o_dict = [serialize(item, True) for item in obj]
else:
o_dict = obj
if no_dump:
return o_dict
result = None
try:
result = json.dumps(o_dict, ensure_ascii=False)
except MemoryError:
return {:
}
return result | Serialize an object.
Returns a dict containing an `_error` property if a MemoryError happens during the
object serialization. See #369.
:param obj: the object to serialize
:type obj: alignak.objects.item.Item | dict | list | str
:param no_dump: if True return dict, otherwise return a json
:type no_dump: bool
:return: dict or json dumps dict with the following structure ::
{'__sys_python_module__': "%s.%s" % (o_cls.__module__, o_cls.__name__)
'content' : obj.serialize()}
:rtype: dict | str | ### Input:
Serialize an object.
Returns a dict containing an `_error` property if a MemoryError happens during the
object serialization. See #369.
:param obj: the object to serialize
:type obj: alignak.objects.item.Item | dict | list | str
:param no_dump: if True return dict, otherwise return a json
:type no_dump: bool
:return: dict or json dumps dict with the following structure ::
{'__sys_python_module__': "%s.%s" % (o_cls.__module__, o_cls.__name__)
'content' : obj.serialize()}
:rtype: dict | str
### Response:
def serialize(obj, no_dump=False):
if hasattr(obj, "serialize") and isinstance(obj.serialize, collections.Callable):
o_dict = {
: "%s.%s" % (obj.__class__.__module__, obj.__class__.__name__),
: obj.serialize()
}
elif isinstance(obj, dict):
o_dict = {}
for key, value in list(obj.items()):
o_dict[key] = serialize(value, True)
elif isinstance(obj, (list, set)):
o_dict = [serialize(item, True) for item in obj]
else:
o_dict = obj
if no_dump:
return o_dict
result = None
try:
result = json.dumps(o_dict, ensure_ascii=False)
except MemoryError:
return {:
}
return result |
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers,
tmpltbank_file, insp_segs, insp_data_name,
insp_anal_name, dax_output, out_dir, tags=None):
logging.info()
if not workflow.cp.has_section():
logging.info()
logging.info()
return
tags = [] if tags is None else tags
makedir(dax_output)
config_path = os.path.abspath(dax_output + + .join(tags) + )
workflow.cp.write(open(config_path, ))
config_file = wdax.File(os.path.basename(config_path))
config_file.PFN(urlparse.urljoin(, urllib.pathname2url(config_path)),
site=)
exe = Executable(workflow.cp, , ifos=workflow.ifos, out_dir=dax_output)
node = exe.create_node()
node.add_input_opt(, config_file)
node.add_input_opt(, tmpltbank_file)
node.add_input_opt(, coinc_file)
node.add_multiifo_input_list_opt(, single_triggers)
node.add_input_opt(, insp_segs)
node.add_opt(, insp_data_name)
node.add_opt(, insp_anal_name)
node.new_output_file_opt(workflow.analysis_time, , , tags=tags)
node.new_output_file_opt(workflow.analysis_time, , , tags=tags)
node.new_output_file_opt(workflow.analysis_time, , , tags=tags)
name = node.output_files[0].name
map_file = node.output_files[1]
tc_file = node.output_files[2]
node.add_opt(, name)
node.add_opt(, out_dir)
workflow += node
fil = node.output_files[0]
try:
staging_site = workflow.cp.get(,
)
except:
staging_site = None
job = dax.DAX(fil)
job.addArguments( % os.path.splitext(os.path.basename(name))[0])
Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site)
workflow._adag.addJob(job)
dep = dax.Dependency(parent=node._dax_node, child=job)
workflow._adag.addDependency(dep)
logging.info() | Create plots that followup the Nth loudest coincident injection
from a statmap produced HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
coinc_file:
single_triggers: list of pycbc.workflow.File
A list cointaining the file objects associated with the merged
single detector trigger files for each ifo.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read and analyzed by each inspiral
job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots. | ### Input:
Create plots that followup the Nth loudest coincident injection
from a statmap produced HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
coinc_file:
single_triggers: list of pycbc.workflow.File
A list cointaining the file objects associated with the merged
single detector trigger files for each ifo.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read and analyzed by each inspiral
job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots.
### Response:
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers,
tmpltbank_file, insp_segs, insp_data_name,
insp_anal_name, dax_output, out_dir, tags=None):
logging.info()
if not workflow.cp.has_section():
logging.info()
logging.info()
return
tags = [] if tags is None else tags
makedir(dax_output)
config_path = os.path.abspath(dax_output + + .join(tags) + )
workflow.cp.write(open(config_path, ))
config_file = wdax.File(os.path.basename(config_path))
config_file.PFN(urlparse.urljoin(, urllib.pathname2url(config_path)),
site=)
exe = Executable(workflow.cp, , ifos=workflow.ifos, out_dir=dax_output)
node = exe.create_node()
node.add_input_opt(, config_file)
node.add_input_opt(, tmpltbank_file)
node.add_input_opt(, coinc_file)
node.add_multiifo_input_list_opt(, single_triggers)
node.add_input_opt(, insp_segs)
node.add_opt(, insp_data_name)
node.add_opt(, insp_anal_name)
node.new_output_file_opt(workflow.analysis_time, , , tags=tags)
node.new_output_file_opt(workflow.analysis_time, , , tags=tags)
node.new_output_file_opt(workflow.analysis_time, , , tags=tags)
name = node.output_files[0].name
map_file = node.output_files[1]
tc_file = node.output_files[2]
node.add_opt(, name)
node.add_opt(, out_dir)
workflow += node
fil = node.output_files[0]
try:
staging_site = workflow.cp.get(,
)
except:
staging_site = None
job = dax.DAX(fil)
job.addArguments( % os.path.splitext(os.path.basename(name))[0])
Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site)
workflow._adag.addJob(job)
dep = dax.Dependency(parent=node._dax_node, child=job)
workflow._adag.addDependency(dep)
logging.info() |
def sfilter(self, source):
sources = []
if source.text[:4].encode(source.encoding) != b:
sources.extend(self._filter(source.text, source.context, source.encoding))
else:
for content, filename, enc in self.get_content(io.BytesIO(source.text.encode(source.encoding))):
sources.extend(self._filter(content, source.context, enc))
return sources | Filter. | ### Input:
Filter.
### Response:
def sfilter(self, source):
sources = []
if source.text[:4].encode(source.encoding) != b:
sources.extend(self._filter(source.text, source.context, source.encoding))
else:
for content, filename, enc in self.get_content(io.BytesIO(source.text.encode(source.encoding))):
sources.extend(self._filter(content, source.context, enc))
return sources |
def to_python(self, value):
if value is None:
return None
if not isinstance(value, six.string_types):
raise forms.ValidationError(
"Invalid value type (should be a string).",
code=,
)
)
return final | Convert the constant to the real choice value. | ### Input:
Convert the constant to the real choice value.
### Response:
def to_python(self, value):
if value is None:
return None
if not isinstance(value, six.string_types):
raise forms.ValidationError(
"Invalid value type (should be a string).",
code=,
)
)
return final |
def format_citations(zid, url=, hits=10, tag_prefix=):
url = (
.format(id=zid, url=url, hits=hits))
metadata = requests.get(url).json()
lines = []
for i, hit in enumerate(metadata[][]):
version = hit[][][len(tag_prefix):]
lines.append( * len(version))
lines.append(version)
lines.append( * len(version))
lines.append()
lines.append(
.format(**hit[]))
if i < hits - 1:
lines.append()
return .join(lines) | Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs | ### Input:
Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs
### Response:
def format_citations(zid, url=, hits=10, tag_prefix=):
url = (
.format(id=zid, url=url, hits=hits))
metadata = requests.get(url).json()
lines = []
for i, hit in enumerate(metadata[][]):
version = hit[][][len(tag_prefix):]
lines.append( * len(version))
lines.append(version)
lines.append( * len(version))
lines.append()
lines.append(
.format(**hit[]))
if i < hits - 1:
lines.append()
return .join(lines) |
def get_identities(self, item):
item = item[]
if in item:
user = self.get_sh_identity(item[][0])
yield user
rsvps = item.get(, [])
for rsvp in rsvps:
user = self.get_sh_identity(rsvp[])
yield user
for comment in item[]:
user = self.get_sh_identity(comment[])
yield user | Return the identities from an item | ### Input:
Return the identities from an item
### Response:
def get_identities(self, item):
item = item[]
if in item:
user = self.get_sh_identity(item[][0])
yield user
rsvps = item.get(, [])
for rsvp in rsvps:
user = self.get_sh_identity(rsvp[])
yield user
for comment in item[]:
user = self.get_sh_identity(comment[])
yield user |
def _request(self, path, method, body=None):
url = .join([_SERVER, path])
(resp, content) = _HTTP.request(url, method,
headers=self._headers, body=body)
content_type = resp.get()
if content_type and content_type.startswith():
content = json.loads(content.decode())
return (resp, content) | Make a request from the API. | ### Input:
Make a request from the API.
### Response:
def _request(self, path, method, body=None):
url = .join([_SERVER, path])
(resp, content) = _HTTP.request(url, method,
headers=self._headers, body=body)
content_type = resp.get()
if content_type and content_type.startswith():
content = json.loads(content.decode())
return (resp, content) |
def get_codec(bytes_):
prefix = extract_prefix(bytes_)
try:
return CODE_TABLE[prefix]
except KeyError:
raise ValueError(.format(prefix)) | Gets the codec used for prefix the multicodec prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: name of the multicodec used to prefix
:rtype: str | ### Input:
Gets the codec used for prefix the multicodec prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: name of the multicodec used to prefix
:rtype: str
### Response:
def get_codec(bytes_):
prefix = extract_prefix(bytes_)
try:
return CODE_TABLE[prefix]
except KeyError:
raise ValueError(.format(prefix)) |
def dynamodb_autoscaling_policy(tables):
return Policy(
Statement=[
Statement(
Effect=Allow,
Resource=dynamodb_arns(tables),
Action=[
dynamodb.DescribeTable,
dynamodb.UpdateTable,
]
),
Statement(
Effect=Allow,
Resource=[],
Action=[
cloudwatch.PutMetricAlarm,
cloudwatch.DescribeAlarms,
cloudwatch.GetMetricStatistics,
cloudwatch.SetAlarmState,
cloudwatch.DeleteAlarms,
]
),
]
) | Policy to allow AutoScaling a list of DynamoDB tables. | ### Input:
Policy to allow AutoScaling a list of DynamoDB tables.
### Response:
def dynamodb_autoscaling_policy(tables):
return Policy(
Statement=[
Statement(
Effect=Allow,
Resource=dynamodb_arns(tables),
Action=[
dynamodb.DescribeTable,
dynamodb.UpdateTable,
]
),
Statement(
Effect=Allow,
Resource=[],
Action=[
cloudwatch.PutMetricAlarm,
cloudwatch.DescribeAlarms,
cloudwatch.GetMetricStatistics,
cloudwatch.SetAlarmState,
cloudwatch.DeleteAlarms,
]
),
]
) |
def b64encoded(self):
if self._b64encoded:
return text_type(self._b64encoded).strip("\r\n")
else:
return base64encode(self.raw) | Return a base64 encoding of the key.
returns:
str: base64 encoding of the public key | ### Input:
Return a base64 encoding of the key.
returns:
str: base64 encoding of the public key
### Response:
def b64encoded(self):
if self._b64encoded:
return text_type(self._b64encoded).strip("\r\n")
else:
return base64encode(self.raw) |
def parse_field_path(api_repr):
field_names = []
for field_name in split_field_path(api_repr):
if field_name[0] == "`" and field_name[-1] == "`":
field_name = field_name[1:-1]
field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK)
field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH)
field_names.append(field_name)
return field_names | Parse a **field path** from into a list of nested field names.
See :func:`field_path` for more on **field paths**.
Args:
api_repr (str):
The unique Firestore api representation which consists of
either simple or UTF-8 field names. It cannot exceed
1500 bytes, and cannot be empty. Simple field names match
``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are
escaped by surrounding them with backticks.
Returns:
List[str, ...]: The list of field names in the field path. | ### Input:
Parse a **field path** from into a list of nested field names.
See :func:`field_path` for more on **field paths**.
Args:
api_repr (str):
The unique Firestore api representation which consists of
either simple or UTF-8 field names. It cannot exceed
1500 bytes, and cannot be empty. Simple field names match
``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are
escaped by surrounding them with backticks.
Returns:
List[str, ...]: The list of field names in the field path.
### Response:
def parse_field_path(api_repr):
field_names = []
for field_name in split_field_path(api_repr):
if field_name[0] == "`" and field_name[-1] == "`":
field_name = field_name[1:-1]
field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK)
field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH)
field_names.append(field_name)
return field_names |
def resolve_variables(self, provided_variables):
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in variable_dict.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
defined_variables = self.get_parameter_definitions()
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in defined_variables.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value | Resolve the values of the blueprint variables.
This will resolve the values of the template parameters with values
from the env file, the config, and any lookups resolved. The
resolution is run twice, in case the blueprint is jinja2 templated
and requires provided variables to render.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables | ### Input:
Resolve the values of the blueprint variables.
This will resolve the values of the template parameters with values
from the env file, the config, and any lookups resolved. The
resolution is run twice, in case the blueprint is jinja2 templated
and requires provided variables to render.
Args:
provided_variables (list of :class:`stacker.variables.Variable`):
list of provided variables
### Response:
def resolve_variables(self, provided_variables):
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in variable_dict.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value
defined_variables = self.get_parameter_definitions()
self.resolved_variables = {}
variable_dict = dict((var.name, var) for var in provided_variables)
for var_name, _var_def in defined_variables.items():
value = resolve_variable(
variable_dict.get(var_name),
self.name
)
if value is not None:
self.resolved_variables[var_name] = value |
def record_run(record_type, print_session_id, **kwds):
if print_session_id and record_type != :
raise RuntimeError(
)
cfstore = ConfigStore()
json.dump(data, fp) | Record shell history. | ### Input:
Record shell history.
### Response:
def record_run(record_type, print_session_id, **kwds):
if print_session_id and record_type != :
raise RuntimeError(
)
cfstore = ConfigStore()
json.dump(data, fp) |
def echo(self, message, *, encoding=_NOTSET):
return self.execute(, message, encoding=encoding) | Echo the given string. | ### Input:
Echo the given string.
### Response:
def echo(self, message, *, encoding=_NOTSET):
return self.execute(, message, encoding=encoding) |
def run_profilers(run_object, prof_config, verbose=False):
if len(prof_config) > len(set(prof_config)):
raise AmbiguousConfigurationError(
% prof_config)
available_profilers = {opt for opt, _ in _PROFILERS}
for option in prof_config:
if option not in available_profilers:
raise BadOptionError( % option)
run_stats = OrderedDict()
present_profilers = ((o, p) for o, p in _PROFILERS if o in prof_config)
for option, prof in present_profilers:
curr_profiler = prof(run_object)
if verbose:
print( % curr_profiler.__class__.__name__)
run_stats[option] = curr_profiler.run()
return run_stats | Runs profilers on run_object.
Args:
run_object: An object (string or tuple) for profiling.
prof_config: A string with profilers configuration.
verbose: True if info about running profilers should be shown.
Returns:
An ordered dictionary with collected stats.
Raises:
AmbiguousConfigurationError: when prof_config is ambiguous.
BadOptionError: when unknown options are present in configuration. | ### Input:
Runs profilers on run_object.
Args:
run_object: An object (string or tuple) for profiling.
prof_config: A string with profilers configuration.
verbose: True if info about running profilers should be shown.
Returns:
An ordered dictionary with collected stats.
Raises:
AmbiguousConfigurationError: when prof_config is ambiguous.
BadOptionError: when unknown options are present in configuration.
### Response:
def run_profilers(run_object, prof_config, verbose=False):
if len(prof_config) > len(set(prof_config)):
raise AmbiguousConfigurationError(
% prof_config)
available_profilers = {opt for opt, _ in _PROFILERS}
for option in prof_config:
if option not in available_profilers:
raise BadOptionError( % option)
run_stats = OrderedDict()
present_profilers = ((o, p) for o, p in _PROFILERS if o in prof_config)
for option, prof in present_profilers:
curr_profiler = prof(run_object)
if verbose:
print( % curr_profiler.__class__.__name__)
run_stats[option] = curr_profiler.run()
return run_stats |
def area2lonlat(dataarray):
area = dataarray.attrs[]
lons, lats = area.get_lonlats_dask()
lons = xr.DataArray(lons, dims=[, ],
attrs={: "longitude",
: "longitude",
: },
name=)
lats = xr.DataArray(lats, dims=[, ],
attrs={: "latitude",
: "latitude",
: },
name=)
dataarray.attrs[] =
return [dataarray, lons, lats] | Convert an area to longitudes and latitudes. | ### Input:
Convert an area to longitudes and latitudes.
### Response:
def area2lonlat(dataarray):
area = dataarray.attrs[]
lons, lats = area.get_lonlats_dask()
lons = xr.DataArray(lons, dims=[, ],
attrs={: "longitude",
: "longitude",
: },
name=)
lats = xr.DataArray(lats, dims=[, ],
attrs={: "latitude",
: "latitude",
: },
name=)
dataarray.attrs[] =
return [dataarray, lons, lats] |
def concat(self, arrs:Collection[Tensor])->Tensor:
"Concatenate the `arrs` along the batch dimension."
return [torch.cat([l[si] for l in arrs], dim=1) for si in range_of(arrs[0])] | Concatenate the `arrs` along the batch dimension. | ### Input:
Concatenate the `arrs` along the batch dimension.
### Response:
def concat(self, arrs:Collection[Tensor])->Tensor:
"Concatenate the `arrs` along the batch dimension."
return [torch.cat([l[si] for l in arrs], dim=1) for si in range_of(arrs[0])] |
def refresh(self):
self.log.info()
astorbgz = self._download_astorb()
astorbDictList = self._parse_astorb_database_file(astorbgz)
self._import_astorb_to_database(astorbDictList)
self.log.info()
return None | *refresh the orbital elements in the astorb.dat database table*
**Return:**
- ``astorb``
**Usage:**
See class docstring | ### Input:
*refresh the orbital elements in the astorb.dat database table*
**Return:**
- ``astorb``
**Usage:**
See class docstring
### Response:
def refresh(self):
self.log.info()
astorbgz = self._download_astorb()
astorbDictList = self._parse_astorb_database_file(astorbgz)
self._import_astorb_to_database(astorbDictList)
self.log.info()
return None |
def new_text_cell(text=None):
cell = NotebookNode()
if text is not None:
cell.text = unicode(text)
cell.cell_type = u
return cell | Create a new text cell. | ### Input:
Create a new text cell.
### Response:
def new_text_cell(text=None):
cell = NotebookNode()
if text is not None:
cell.text = unicode(text)
cell.cell_type = u
return cell |
def get_default_classes(self):
default_classes = super(Tab, self).get_default_classes()
if self.is_active():
default_classes.extend(CSS_ACTIVE_TAB_CLASSES)
if not self._enabled:
default_classes.extend(CSS_DISABLED_TAB_CLASSES)
return default_classes | Returns a list of the default classes for the tab.
Defaults to and empty list (``[]``), however additional classes may
be added depending on the state of the tab as follows:
If the tab is the active tab for the tab group, in which
the class ``"active"`` will be added.
If the tab is not enabled, the classes the class ``"disabled"``
will be added. | ### Input:
Returns a list of the default classes for the tab.
Defaults to and empty list (``[]``), however additional classes may
be added depending on the state of the tab as follows:
If the tab is the active tab for the tab group, in which
the class ``"active"`` will be added.
If the tab is not enabled, the classes the class ``"disabled"``
will be added.
### Response:
def get_default_classes(self):
default_classes = super(Tab, self).get_default_classes()
if self.is_active():
default_classes.extend(CSS_ACTIVE_TAB_CLASSES)
if not self._enabled:
default_classes.extend(CSS_DISABLED_TAB_CLASSES)
return default_classes |
async def _do_ping(self):
for agent_addr, friendly_name in list(self._registered_agents.items()):
try:
ping_count = self._ping_count.get(agent_addr, 0)
if ping_count > 5:
self._logger.warning("Agent %s (%s) does not respond: removing from list.", agent_addr, friendly_name)
delete_agent = True
else:
self._ping_count[agent_addr] = ping_count + 1
await ZMQUtils.send_with_addr(self._agent_socket, agent_addr, Ping())
delete_agent = False
except:
self._logger.exception("Failed to send ping to agent %s (%s). Removing it from list.", agent_addr, friendly_name)
delete_agent = True
if delete_agent:
try:
await self._delete_agent(agent_addr)
except:
self._logger.exception("Failed to delete agent %s (%s)!", agent_addr, friendly_name)
self._loop.call_later(1, self._create_safe_task, self._do_ping()) | Ping the agents | ### Input:
Ping the agents
### Response:
async def _do_ping(self):
for agent_addr, friendly_name in list(self._registered_agents.items()):
try:
ping_count = self._ping_count.get(agent_addr, 0)
if ping_count > 5:
self._logger.warning("Agent %s (%s) does not respond: removing from list.", agent_addr, friendly_name)
delete_agent = True
else:
self._ping_count[agent_addr] = ping_count + 1
await ZMQUtils.send_with_addr(self._agent_socket, agent_addr, Ping())
delete_agent = False
except:
self._logger.exception("Failed to send ping to agent %s (%s). Removing it from list.", agent_addr, friendly_name)
delete_agent = True
if delete_agent:
try:
await self._delete_agent(agent_addr)
except:
self._logger.exception("Failed to delete agent %s (%s)!", agent_addr, friendly_name)
self._loop.call_later(1, self._create_safe_task, self._do_ping()) |
def load_steps(working_dir=None, steps_dir=None, step_file=None,
step_list=None):
if steps_dir is not None:
step_files = glob.glob(os.path.join(steps_dir, ))
elif step_file is not None:
step_files = [step_file]
elif step_list is not None:
step_files = []
for path in step_list:
if os.path.isdir(path):
step_files += glob.glob(os.path.join(path, ))
else:
step_files.append(path)
else:
step_files = []
if working_dir is not None:
step_files = sort_loading_order(step_files)
steps = {}
for f in step_files:
if working_dir is not None:
if not working_dir == os.path.dirname(f) and not is_url(f):
copied_file = os.path.join(working_dir, os.path.basename(f))
shutil.copy2(f, copied_file)
f = copied_file
try:
s = Step(f)
steps[s.name] = s
except (NotImplementedError, ValidationException,
PackedWorkflowException) as e:
logger.warning(e)
return steps | Return a dictionary containing Steps read from file.
Args:
steps_dir (str, optional): path to directory containing CWL files.
step_file (str, optional): path or http(s) url to a single CWL file.
step_list (list, optional): a list of directories, urls or local file
paths to CWL files or directories containing CWL files.
Return:
dict containing (name, Step) entries. | ### Input:
Return a dictionary containing Steps read from file.
Args:
steps_dir (str, optional): path to directory containing CWL files.
step_file (str, optional): path or http(s) url to a single CWL file.
step_list (list, optional): a list of directories, urls or local file
paths to CWL files or directories containing CWL files.
Return:
dict containing (name, Step) entries.
### Response:
def load_steps(working_dir=None, steps_dir=None, step_file=None,
step_list=None):
if steps_dir is not None:
step_files = glob.glob(os.path.join(steps_dir, ))
elif step_file is not None:
step_files = [step_file]
elif step_list is not None:
step_files = []
for path in step_list:
if os.path.isdir(path):
step_files += glob.glob(os.path.join(path, ))
else:
step_files.append(path)
else:
step_files = []
if working_dir is not None:
step_files = sort_loading_order(step_files)
steps = {}
for f in step_files:
if working_dir is not None:
if not working_dir == os.path.dirname(f) and not is_url(f):
copied_file = os.path.join(working_dir, os.path.basename(f))
shutil.copy2(f, copied_file)
f = copied_file
try:
s = Step(f)
steps[s.name] = s
except (NotImplementedError, ValidationException,
PackedWorkflowException) as e:
logger.warning(e)
return steps |
def for_each(self, operation, limit=0, verbose=False):
if limit != 0:
count = 0
while self.has_next():
operation.perform(self.next())
count += 1
if verbose:
print count
if count >= limit:
break
else:
while self.has_next():
operation.perform(self.next()) | Applies the given Operation to each item in the stream. The Operation executes on the
items in the stream in the order that they appear in the stream.
If the limit is supplied, then processing of the stream will stop after that many items
have been processed. | ### Input:
Applies the given Operation to each item in the stream. The Operation executes on the
items in the stream in the order that they appear in the stream.
If the limit is supplied, then processing of the stream will stop after that many items
have been processed.
### Response:
def for_each(self, operation, limit=0, verbose=False):
if limit != 0:
count = 0
while self.has_next():
operation.perform(self.next())
count += 1
if verbose:
print count
if count >= limit:
break
else:
while self.has_next():
operation.perform(self.next()) |
def get_bytes(self):
ret = struct.pack("<I16s16sQ", client_DH_inner_data.constructor, self.nonce, self.server_nonce,
self.retry_id)
bytes_io = BytesIO()
bytes_io.write(ret)
serialize_string(bytes_io, self.g_b)
return bytes_io.getvalue() | client_DH_inner_data#6643b654 nonce:int128 server_nonce:int128 retry_id:long g_b:string = Client_DH_Inner_Data | ### Input:
client_DH_inner_data#6643b654 nonce:int128 server_nonce:int128 retry_id:long g_b:string = Client_DH_Inner_Data
### Response:
def get_bytes(self):
ret = struct.pack("<I16s16sQ", client_DH_inner_data.constructor, self.nonce, self.server_nonce,
self.retry_id)
bytes_io = BytesIO()
bytes_io.write(ret)
serialize_string(bytes_io, self.g_b)
return bytes_io.getvalue() |
def read_omega_scan_config(source):
out = ChannelList()
append = out.append
if isinstance(source, FILE_LIKE):
close = False
else:
source = open(source, )
close = True
try:
section = None
while True:
try:
line = next(source)
except StopIteration:
break
if line == or line == or line.startswith():
continue
elif line.startswith():
section = line[1:-2]
elif line.startswith():
append(parse_omega_channel(source, section))
else:
raise RuntimeError("Failed to parse Omega config line:\n%s"
% line)
finally:
if close:
source.close()
return out | Parse an Omega-scan configuration file into a `ChannelList`
Parameters
----------
source : `str`
path of Omega configuration file to parse
Returns
-------
channels : `ChannelList`
the list of channels (in order) as parsed
Raises
------
RuntimeError
if this method finds a line it cannot parse sensibly | ### Input:
Parse an Omega-scan configuration file into a `ChannelList`
Parameters
----------
source : `str`
path of Omega configuration file to parse
Returns
-------
channels : `ChannelList`
the list of channels (in order) as parsed
Raises
------
RuntimeError
if this method finds a line it cannot parse sensibly
### Response:
def read_omega_scan_config(source):
out = ChannelList()
append = out.append
if isinstance(source, FILE_LIKE):
close = False
else:
source = open(source, )
close = True
try:
section = None
while True:
try:
line = next(source)
except StopIteration:
break
if line == or line == or line.startswith():
continue
elif line.startswith():
section = line[1:-2]
elif line.startswith():
append(parse_omega_channel(source, section))
else:
raise RuntimeError("Failed to parse Omega config line:\n%s"
% line)
finally:
if close:
source.close()
return out |
def _set_labels(self, axes, dimensions, xlabel=None, ylabel=None, zlabel=None):
xlabel, ylabel, zlabel = self._get_axis_labels(dimensions, xlabel, ylabel, zlabel)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
if xlabel and self.xaxis and in self.labelled:
axes.set_xlabel(xlabel, **self._fontsize())
if ylabel and self.yaxis and in self.labelled:
axes.set_ylabel(ylabel, **self._fontsize())
if zlabel and self.zaxis and in self.labelled:
axes.set_zlabel(zlabel, **self._fontsize()) | Sets the labels of the axes using the supplied list of dimensions.
Optionally explicit labels may be supplied to override the dimension
label. | ### Input:
Sets the labels of the axes using the supplied list of dimensions.
Optionally explicit labels may be supplied to override the dimension
label.
### Response:
def _set_labels(self, axes, dimensions, xlabel=None, ylabel=None, zlabel=None):
xlabel, ylabel, zlabel = self._get_axis_labels(dimensions, xlabel, ylabel, zlabel)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
if xlabel and self.xaxis and in self.labelled:
axes.set_xlabel(xlabel, **self._fontsize())
if ylabel and self.yaxis and in self.labelled:
axes.set_ylabel(ylabel, **self._fontsize())
if zlabel and self.zaxis and in self.labelled:
axes.set_zlabel(zlabel, **self._fontsize()) |
def dre_dsigmai(self, pars):
r
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result | r"""
:math:Add formula | ### Input:
r"""
:math:Add formula
### Response:
def dre_dsigmai(self, pars):
r
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result |
def delete(path, attribute):
*
cmd = .format(attribute, path)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if in exc.strerror:
raise CommandExecutionError(.format(path))
if in exc.strerror:
raise CommandExecutionError(.format(attribute))
raise CommandExecutionError(.format(exc.strerror))
return attribute not in list_(path) | Removes the given attribute from the file
:param str path: The file(s) to get attributes from
:param str attribute: The attribute name to be deleted from the
file/directory
:return: True if successful, otherwise False
:rtype: bool
:raises: CommandExecutionError on file not found, attribute not found, and
any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr" | ### Input:
Removes the given attribute from the file
:param str path: The file(s) to get attributes from
:param str attribute: The attribute name to be deleted from the
file/directory
:return: True if successful, otherwise False
:rtype: bool
:raises: CommandExecutionError on file not found, attribute not found, and
any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr"
### Response:
def delete(path, attribute):
*
cmd = .format(attribute, path)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if in exc.strerror:
raise CommandExecutionError(.format(path))
if in exc.strerror:
raise CommandExecutionError(.format(attribute))
raise CommandExecutionError(.format(exc.strerror))
return attribute not in list_(path) |
def open_link(self, link_uri):
self.connection_requested.call(link_uri)
self.state = State.INITIALIZED
self.link_uri = link_uri
try:
self.link = cflib.crtp.get_link_driver(
link_uri, self._link_quality_cb, self._link_error_cb)
if not self.link:
message = \
.format(link_uri)
logger.warning(message)
self.connection_failed.call(link_uri, message)
else:
self.packet_received.add_callback(
self._check_for_initial_packet_cb)
self._start_connection_setup()
except Exception as ex:
import traceback
logger.error("Couldnt load link driver: %s\n\n%s" % (
ex, traceback.format_exc())
if self.link:
self.link.close()
self.link = None
self.connection_failed.call(link_uri, exception_text) | Open the communication link to a copter at the given URI and setup the
connection (download log/parameter TOC). | ### Input:
Open the communication link to a copter at the given URI and setup the
connection (download log/parameter TOC).
### Response:
def open_link(self, link_uri):
self.connection_requested.call(link_uri)
self.state = State.INITIALIZED
self.link_uri = link_uri
try:
self.link = cflib.crtp.get_link_driver(
link_uri, self._link_quality_cb, self._link_error_cb)
if not self.link:
message = \
.format(link_uri)
logger.warning(message)
self.connection_failed.call(link_uri, message)
else:
self.packet_received.add_callback(
self._check_for_initial_packet_cb)
self._start_connection_setup()
except Exception as ex:
import traceback
logger.error("Couldnt load link driver: %s\n\n%s" % (
ex, traceback.format_exc())
if self.link:
self.link.close()
self.link = None
self.connection_failed.call(link_uri, exception_text) |
def server_version(self):
health_url = "%s/v1/sys/health" % self.vault_addr
resp = self.session.request(, health_url, **self._kwargs)
if resp.status_code == 200 or resp.status_code == 429:
blob = resp.json()
if in blob:
return blob[]
else:
raise aomi.exceptions.VaultProblem()
return None | Attempts to determine the version of Vault that a
server is running. Some actions will change on older
Vault deployments. | ### Input:
Attempts to determine the version of Vault that a
server is running. Some actions will change on older
Vault deployments.
### Response:
def server_version(self):
health_url = "%s/v1/sys/health" % self.vault_addr
resp = self.session.request(, health_url, **self._kwargs)
if resp.status_code == 200 or resp.status_code == 429:
blob = resp.json()
if in blob:
return blob[]
else:
raise aomi.exceptions.VaultProblem()
return None |
def setup_logger(log=None, level=):
if not log:
log = logging.getLogger()
if not log.handlers:
channel = logging.StreamHandler()
channel.setFormatter(DebugLogFormatter())
log.setLevel(level)
log.addHandler(channel)
repo_logger = logging.getLogger()
channel = logging.StreamHandler()
channel.setFormatter(RepoLogFormatter())
channel.addFilter(RepoFilter())
repo_logger.setLevel(level)
repo_logger.addHandler(channel) | Setup logging for CLI use.
:param log: instance of logger
:type log: :py:class:`Logger` | ### Input:
Setup logging for CLI use.
:param log: instance of logger
:type log: :py:class:`Logger`
### Response:
def setup_logger(log=None, level=):
if not log:
log = logging.getLogger()
if not log.handlers:
channel = logging.StreamHandler()
channel.setFormatter(DebugLogFormatter())
log.setLevel(level)
log.addHandler(channel)
repo_logger = logging.getLogger()
channel = logging.StreamHandler()
channel.setFormatter(RepoLogFormatter())
channel.addFilter(RepoFilter())
repo_logger.setLevel(level)
repo_logger.addHandler(channel) |
def validate_key(self, key):
if not models.PasswordResetToken.valid_tokens.filter(key=key).exists():
raise serializers.ValidationError(
_("The provided reset token does not exist, or is expired.")
)
return key | Validate the provided reset key.
Returns:
The validated key.
Raises:
serializers.ValidationError:
If the provided key does not exist. | ### Input:
Validate the provided reset key.
Returns:
The validated key.
Raises:
serializers.ValidationError:
If the provided key does not exist.
### Response:
def validate_key(self, key):
if not models.PasswordResetToken.valid_tokens.filter(key=key).exists():
raise serializers.ValidationError(
_("The provided reset token does not exist, or is expired.")
)
return key |
def _format_mongodb_uri(parsed_uri):
user_pass =
if parsed_uri.get() and parsed_uri.get():
user_pass = .format(**parsed_uri)
_nodes = []
for host, port in parsed_uri.get():
if in host and not host.endswith():
host = .format(host)
if port == 27017:
_nodes.append(host)
else:
_nodes.append(.format(host, port))
nodelist = .join(_nodes)
options =
if parsed_uri.get():
_opt_list = []
for key, value in parsed_uri.get().items():
if isinstance(value, bool):
value = str(value).lower()
_opt_list.append(.format(key, value))
options = + .join(_opt_list)
db_name = parsed_uri.get() or
res = "mongodb://{user_pass!s}{nodelist!s}/{db_name!s}{options!s}".format(
user_pass=user_pass,
nodelist=nodelist,
db_name=db_name,
options=options)
return res | Painstakingly reconstruct a MongoDB URI parsed using pymongo.uri_parser.parse_uri.
:param parsed_uri: Result of pymongo.uri_parser.parse_uri
:type parsed_uri: dict
:return: New URI
:rtype: str | unicode | ### Input:
Painstakingly reconstruct a MongoDB URI parsed using pymongo.uri_parser.parse_uri.
:param parsed_uri: Result of pymongo.uri_parser.parse_uri
:type parsed_uri: dict
:return: New URI
:rtype: str | unicode
### Response:
def _format_mongodb_uri(parsed_uri):
user_pass =
if parsed_uri.get() and parsed_uri.get():
user_pass = .format(**parsed_uri)
_nodes = []
for host, port in parsed_uri.get():
if in host and not host.endswith():
host = .format(host)
if port == 27017:
_nodes.append(host)
else:
_nodes.append(.format(host, port))
nodelist = .join(_nodes)
options =
if parsed_uri.get():
_opt_list = []
for key, value in parsed_uri.get().items():
if isinstance(value, bool):
value = str(value).lower()
_opt_list.append(.format(key, value))
options = + .join(_opt_list)
db_name = parsed_uri.get() or
res = "mongodb://{user_pass!s}{nodelist!s}/{db_name!s}{options!s}".format(
user_pass=user_pass,
nodelist=nodelist,
db_name=db_name,
options=options)
return res |
def transform_position_array(array, pos, euler, is_normal, reverse=False):
trans_matrix = euler_trans_matrix(*euler)
if not reverse:
trans_matrix = trans_matrix.T
if isinstance(array, ComputedColumn):
array = array.for_computations
if is_normal:
return np.dot(np.asarray(array), trans_matrix)
else:
return np.dot(np.asarray(array), trans_matrix) + np.asarray(pos) | Transform any Nx3 position array by translating to a center-of-mass 'pos'
and applying an euler transformation
:parameter array array: numpy array of Nx3 positions in the original (star)
coordinate frame
:parameter array pos: numpy array with length 3 giving cartesian
coordinates to offset all positions
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter bool is_normal: whether each entry is a normal vector rather
than position vector. If true, the quantities won't be offset by
'pos'
:return: new positions array with same shape as 'array'. | ### Input:
Transform any Nx3 position array by translating to a center-of-mass 'pos'
and applying an euler transformation
:parameter array array: numpy array of Nx3 positions in the original (star)
coordinate frame
:parameter array pos: numpy array with length 3 giving cartesian
coordinates to offset all positions
:parameter array euler: euler angles (etheta, elongan, eincl) in radians
:parameter bool is_normal: whether each entry is a normal vector rather
than position vector. If true, the quantities won't be offset by
'pos'
:return: new positions array with same shape as 'array'.
### Response:
def transform_position_array(array, pos, euler, is_normal, reverse=False):
trans_matrix = euler_trans_matrix(*euler)
if not reverse:
trans_matrix = trans_matrix.T
if isinstance(array, ComputedColumn):
array = array.for_computations
if is_normal:
return np.dot(np.asarray(array), trans_matrix)
else:
return np.dot(np.asarray(array), trans_matrix) + np.asarray(pos) |
def _update_rs_no_primary_from_member(
sds,
replica_set_name,
server_description):
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
return topology_type, replica_set_name
for address in server_description.all_hosts:
if address not in sds:
sds[address] = ServerDescription(address)
if (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
return topology_type, replica_set_name | RS without known primary. Update from a non-primary's response.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns (new topology type, new replica_set_name). | ### Input:
RS without known primary. Update from a non-primary's response.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns (new topology type, new replica_set_name).
### Response:
def _update_rs_no_primary_from_member(
sds,
replica_set_name,
server_description):
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
return topology_type, replica_set_name
for address in server_description.all_hosts:
if address not in sds:
sds[address] = ServerDescription(address)
if (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
return topology_type, replica_set_name |
def scheduleMeasurement(self, measurementId, duration, start):
results = {}
for device in self.getDevices(RecordingDeviceStatus.INITIALISED.name):
logger.info( + measurementId + + device.payload[])
try:
resp = self.httpclient.put(device.payload[] + + measurementId,
json={: duration, : start.strftime(DATETIME_FORMAT)})
logger.info( + measurementId + + device.payload[] + +
str(resp.status_code))
results[device] = resp.status_code
except Exception as e:
logger.exception(e)
results[device] = 500
return results | Schedules the requested measurement session with all INITIALISED devices.
:param measurementId:
:param duration:
:param start:
:return: a dict of device vs status. | ### Input:
Schedules the requested measurement session with all INITIALISED devices.
:param measurementId:
:param duration:
:param start:
:return: a dict of device vs status.
### Response:
def scheduleMeasurement(self, measurementId, duration, start):
results = {}
for device in self.getDevices(RecordingDeviceStatus.INITIALISED.name):
logger.info( + measurementId + + device.payload[])
try:
resp = self.httpclient.put(device.payload[] + + measurementId,
json={: duration, : start.strftime(DATETIME_FORMAT)})
logger.info( + measurementId + + device.payload[] + +
str(resp.status_code))
results[device] = resp.status_code
except Exception as e:
logger.exception(e)
results[device] = 500
return results |
def pix2sky(self, pixel):
pixbox = numpy.array([pixel, pixel])
skybox = self.wcs.all_pix2world(pixbox, 1)
return [float(skybox[0][0]), float(skybox[0][1])] | Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees) | ### Input:
Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees)
### Response:
def pix2sky(self, pixel):
pixbox = numpy.array([pixel, pixel])
skybox = self.wcs.all_pix2world(pixbox, 1)
return [float(skybox[0][0]), float(skybox[0][1])] |
def generate_scale_free_graph(steps, growth_num, self_loops=False, multi_edges=False):
graph = Graph.Graph()
store = []
for i in range(growth_num):
for j in range(i + 1, growth_num):
store.append(i)
store.append(j)
graph.add_edge(i,j)
for node in range(growth_num, steps * growth_num):
graph.add_node(node)
while ( graph.out_degree(node) < growth_num ):
nbr = random.choice(store)
if node == nbr and not self_loops:
continue
if graph.edge_by_node(node, nbr) and not multi_edges:
continue
graph.add_edge(node, nbr)
for nbr in graph.out_nbrs(node):
store.append(node)
store.append(nbr)
return graph | Generates and returns a :py:class:`~altgraph.Graph.Graph` instance that will have *steps* \* *growth_num* nodes
and a scale free (powerlaw) connectivity. Starting with a fully connected graph with *growth_num* nodes
at every step *growth_num* nodes are added to the graph and are connected to existing nodes with
a probability proportional to the degree of these existing nodes. | ### Input:
Generates and returns a :py:class:`~altgraph.Graph.Graph` instance that will have *steps* \* *growth_num* nodes
and a scale free (powerlaw) connectivity. Starting with a fully connected graph with *growth_num* nodes
at every step *growth_num* nodes are added to the graph and are connected to existing nodes with
a probability proportional to the degree of these existing nodes.
### Response:
def generate_scale_free_graph(steps, growth_num, self_loops=False, multi_edges=False):
graph = Graph.Graph()
store = []
for i in range(growth_num):
for j in range(i + 1, growth_num):
store.append(i)
store.append(j)
graph.add_edge(i,j)
for node in range(growth_num, steps * growth_num):
graph.add_node(node)
while ( graph.out_degree(node) < growth_num ):
nbr = random.choice(store)
if node == nbr and not self_loops:
continue
if graph.edge_by_node(node, nbr) and not multi_edges:
continue
graph.add_edge(node, nbr)
for nbr in graph.out_nbrs(node):
store.append(node)
store.append(nbr)
return graph |
def infer_location(
self,
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
):
self.location_from = infer_location(
self.points[0],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
self.location_to = infer_location(
self.points[-1],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
return self | In-place location inferring
See infer_location function
Args:
Returns:
:obj:`Segment`: self | ### Input:
In-place location inferring
See infer_location function
Args:
Returns:
:obj:`Segment`: self
### Response:
def infer_location(
self,
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
):
self.location_from = infer_location(
self.points[0],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
self.location_to = infer_location(
self.points[-1],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
return self |
def triggered(self):
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True | For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached. | ### Input:
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
### Response:
def triggered(self):
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True |
def from_urlsafe(cls, urlsafe):
try:
key = ndb.Key(urlsafe=urlsafe)
except:
return None
obj = key.get()
if obj and isinstance(obj, cls):
return obj | Returns an instance of the model from a urlsafe string.
:param urlsafe: urlsafe key
:return: Instance of cls | ### Input:
Returns an instance of the model from a urlsafe string.
:param urlsafe: urlsafe key
:return: Instance of cls
### Response:
def from_urlsafe(cls, urlsafe):
try:
key = ndb.Key(urlsafe=urlsafe)
except:
return None
obj = key.get()
if obj and isinstance(obj, cls):
return obj |
def get_nodes(self):
nodes = []
for age, level in enumerate(self.nodes):
nodes.append([])
for node in level:
nodes[age].append(node.get_tuple())
return nodes | Get the tree nodes as list.
Returns:
list: A 2d-list holding the grown nodes coordinates as tupel for every age.
Example:
[
[(10, 40)],
[(20, 80), (100, 30)],
[(100, 90), (120, 40), ...],
...
] | ### Input:
Get the tree nodes as list.
Returns:
list: A 2d-list holding the grown nodes coordinates as tupel for every age.
Example:
[
[(10, 40)],
[(20, 80), (100, 30)],
[(100, 90), (120, 40), ...],
...
]
### Response:
def get_nodes(self):
nodes = []
for age, level in enumerate(self.nodes):
nodes.append([])
for node in level:
nodes[age].append(node.get_tuple())
return nodes |
def linop_scale(w, op):
with tf.name_scope("linop_scale"):
def scaled_identity(w):
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, tf.linalg.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, tf.linalg.LinearOperatorDiag):
return tf.linalg.LinearOperatorDiag(
diag=w[..., tf.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorLowerTriangular):
return tf.linalg.LinearOperatorLowerTriangular(
tril=w[..., tf.newaxis, tf.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__)) | Creates weighted `LinOp` from existing `LinOp`. | ### Input:
Creates weighted `LinOp` from existing `LinOp`.
### Response:
def linop_scale(w, op):
with tf.name_scope("linop_scale"):
def scaled_identity(w):
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, tf.linalg.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, tf.linalg.LinearOperatorDiag):
return tf.linalg.LinearOperatorDiag(
diag=w[..., tf.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorLowerTriangular):
return tf.linalg.LinearOperatorLowerTriangular(
tril=w[..., tf.newaxis, tf.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__)) |
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
build_dir = build_dir or
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod | Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP | ### Input:
Compiles sources in `srcs` to a shared object (python extension)
which is imported. If shared object is newer than the sources, they
are not recompiled but instead it is imported.
Parameters
----------
srcs: string
list of paths to sources
extname: string
name of extension (default: None)
(taken from the last file in `srcs` - without extension)
build_dir: string
path to directory in which objects files etc. are generated
compile_kwargs: dict
keyword arguments passed to compile_sources
link_kwargs: dict
keyword arguments passed to link_py_so
**kwargs:
additional keyword arguments overwrites to both compile_kwargs
and link_kwargs useful for convenience e.g. when passing logger
Returns
-------
the imported module
Examples
--------
>>> mod = compile_link_import_py_ext(['fft.f90', 'convolution.cpp',\
'fft_wrapper.pyx'], only_update=True) # doctest: +SKIP
>>> Aprim = mod.fft(A) # doctest: +SKIP
### Response:
def compile_link_import_py_ext(
srcs, extname=None, build_dir=None, compile_kwargs=None,
link_kwargs=None, **kwargs):
build_dir = build_dir or
if extname is None:
extname = os.path.splitext(os.path.basename(srcs[-1]))[0]
compile_kwargs = compile_kwargs or {}
compile_kwargs.update(kwargs)
link_kwargs = link_kwargs or {}
link_kwargs.update(kwargs)
try:
mod = import_module_from_file(os.path.join(build_dir, extname), srcs)
except ImportError:
objs = compile_sources(list(map(get_abspath, srcs)), destdir=build_dir,
cwd=build_dir, **compile_kwargs)
so = link_py_so(
objs, cwd=build_dir, fort=any_fort(srcs), cplus=any_cplus(srcs),
**link_kwargs)
mod = import_module_from_file(so)
return mod |
def find_formatter(name, path):
if name == AUTO_FORMATTER:
return find_formatters(path, silent=False)[0]
else:
return get_formatter(name, silent=False) | Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*.
Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply
uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*. | ### Input:
Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*.
Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply
uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*.
### Response:
def find_formatter(name, path):
if name == AUTO_FORMATTER:
return find_formatters(path, silent=False)[0]
else:
return get_formatter(name, silent=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.