Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
388,600 | def green(fn=None, consume_green_mode=True):
def decorator(fn):
@wraps(fn)
def greener(obj, *args, **kwargs):
args = (obj,) + args
wait = kwargs.pop(, None)
timeout = kwargs.pop(, None)
access = kwargs.pop if consume_green_mode else kwargs.get
green_mode = access(, None)
executor = get_object_executor(obj, green_mode)
return executor.run(fn, args, kwargs, wait=wait, timeout=timeout)
return greener
if fn is None:
return decorator
return decorator(fn) | Make a function green. Can be used as a decorator. |
388,601 | def get(self, measurementId):
logger.info( + measurementId)
measurement = self._measurementController.getMeasurement(measurementId, MeasurementStatus.COMPLETE)
if measurement is not None:
if measurement.inflate():
data = {
name: {
: {
: self._jsonify(data.raw()),
: self._jsonify(data.raw()),
: self._jsonify(data.raw())
},
: {
: self._jsonify(data.vibration()),
: self._jsonify(data.vibration()),
: self._jsonify(data.vibration())
},
: {
: self._jsonify(data.tilt()),
: self._jsonify(data.tilt()),
: self._jsonify(data.tilt())
}
}
for name, data in measurement.data.items()
}
return data, 200
else:
return None, 404
else:
return None, 404 | Analyses the measurement with the given parameters
:param measurementId:
:return: |
388,602 | def _handle_sub_action(self, input_dict, handler):
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
sub_value = input_dict[key]
input_dict[key] = self._handle_sub_value(sub_value, handler)
return input_dict | Handles resolving replacements in the Sub action based on the handler that is passed as an input.
:param input_dict: Dictionary to be resolved
:param supported_values: One of several different objects that contain the supported values that
need to be changed. See each method above for specifics on these objects.
:param handler: handler that is specific to each implementation.
:return: Resolved value of the Sub dictionary |
388,603 | def search(self, **kwargs):
return super(ApiV4Neighbor, self).get(self.prepare_url(
, kwargs)) | Method to search neighbors based on extends search.
:param search: Dict containing QuerySets to find neighbors.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing neighbors |
388,604 | def namedb_create(path, genesis_block):
global BLOCKSTACK_DB_SCRIPT
if os.path.exists( path ):
raise Exception("Database already exists" % path)
lines = [l + ";" for l in BLOCKSTACK_DB_SCRIPT.split(";")]
con = sqlite3.connect( path, isolation_level=None, timeout=2**30 )
for line in lines:
db_query_execute(con, line, ())
con.row_factory = namedb_row_factory
namedb_create_token_genesis(con, genesis_block[], genesis_block[])
return con | Create a sqlite3 db at the given path.
Create all the tables and indexes we need. |
388,605 | def generate_id_name_map(sdk, reverse=False):
global_id_name_dict = {}
global_name_id_dict = {}
system_list = []
if_id_to_name = {}
global_swi_id = {}
global_ln_id = {}
swi_to_wan_network_dict = {}
swi_to_site_dict = {}
wan_network_to_swi_dict = {}
all_anynets = {}
all_vpns = {}
swi_id_name_dict = {}
site_swi_dict = {}
path_id_to_name = {}
vpn_id_to_anynet_id = {}
logger.info("Caching Operators..")
id_operator_dict, operator_id_dict = operators_to_name_dict(sdk)
if id_operator_dict:
global_id_name_dict.update(id_operator_dict)
global_name_id_dict.update(operator_id_dict)
if operator_id_dict:
global_name_id_dict.update(operator_id_dict)
logger.info("Caching Sites..")
id_site_dict, site_id_dict, site_id_list, site_info_dict = siteid_to_name_dict(sdk)
global_id_name_dict.update(id_site_dict)
global_name_id_dict.update(site_id_dict)
logger.info("Caching Elements..")
id_element_dict, element_id_dict, element_site_dict, element_id_list = elements_to_name_dict(sdk)
global_id_name_dict.update(id_element_dict)
global_name_id_dict.update(element_id_dict)
logger.info("Caching WAN Networks..")
id_wannetwork_dict, name_wannetwork_id_dict, wannetwork_id_list, wannetwork_type_dict = wan_network_dicts(sdk)
global_id_name_dict.update(id_wannetwork_dict)
global_name_id_dict.update(name_wannetwork_id_dict)
logger.info("Caching Circuit Catagories..")
id_circuit_categories, name_circuit_categories = circuit_categories_dicts(sdk)
global_id_name_dict.update(id_circuit_categories)
global_name_id_dict.update(name_circuit_categories)
logger.info("Caching Network Contexts..")
id_network_contexts, name_circuit_contexts = network_context_dicts(sdk)
global_id_name_dict.update(id_network_contexts)
global_name_id_dict.update(name_circuit_contexts)
logger.info("Caching Appdefs..")
id_appdef_dict, name_appdef_dict, appdef_id_list = appdefs_to_name_dict(sdk)
global_id_name_dict.update(id_appdef_dict)
global_name_id_dict.update(name_appdef_dict)
logger.info("Caching Policysets..")
id_policyset_dict, name_policyset_dict, policyset_id_list = policyset_to_name_dict(sdk)
global_id_name_dict.update(id_policyset_dict)
global_name_id_dict.update(name_policyset_dict)
logger.info("Caching Security Policysets..")
id_securitypolicyset_dict, name_securitypolicyset_dict, \
securitypolicyset_id_list = securitypolicyset_to_name_dict(sdk)
global_id_name_dict.update(id_securitypolicyset_dict)
global_name_id_dict.update(name_securitypolicyset_dict)
logger.info("Caching Security Zones..")
id_securityzone_dict, securityzone_id_dict, securityzone_id_list = securityzone_to_name_dict(sdk)
global_id_name_dict.update(id_securityzone_dict)
global_name_id_dict.update(securityzone_id_dict)
id_interface_dict = {}
logger.info("Filling Network Site->Element->Interface table..")
for site in site_id_list:
elements = []
swi_id_dict = {}
ln_id_dict = {}
for element in element_id_list:
site_in = element_site_dict.get(element, None)
if site_in and site_in == site:
interfaces_list, if_id_to_name_item, if_name_to_id_item, _, \
_, if_id_data_entry = interface_query(site, element, sdk)
elements.append({
: element,
: id_element_dict.get(element, ""),
: interfaces_list
})
if_id_to_name.update(if_id_to_name_item)
id_interface_dict.update(if_id_data_entry)
system_list.append({
: site,
: id_site_dict.get(site, ""),
: elements
})
resp = sdk.get.waninterfaces(site)
swi_status = resp.cgx_status
swi_query = resp.cgx_content
if swi_status:
for current_swi in swi_query.get(, []):
wan_network_id = current_swi.get(, "")
swi_id = current_swi.get(, "")
name = current_swi.get()
if name and swi_id:
swi_id_name_dict[swi_id] = name
elif swi_id and wan_network_id:
wan_network_name = id_wannetwork_dict.get(wan_network_id, wan_network_id)
swi_id_name_dict[swi_id] = "Circuit to {0}".format(wan_network_name)
if swi_id:
swi_to_site_dict[swi_id] = site
if wan_network_id and swi_id:
logger.debug(.format(swi_id, site))
existing_swi_list = wan_network_to_swi_dict.get(wan_network_id, [])
swi_to_wan_network_dict[swi_id] = wan_network_id
existing_swi_list.append(swi_id)
wan_network_to_swi_dict[wan_network_id] = existing_swi_list
global_swi_id.update(swi_id_name_dict)
resp = sdk.get.lannetworks(site)
ln_status = resp.cgx_status
ln_query = resp.cgx_content
if ln_status:
for ln in ln_query.get():
ln_id = ln.get()
ln_name = ln.get()
if ln_id and ln_name:
ln_id_dict[ln_id] = ln_name
global_ln_id.update(ln_id_dict)
logger.info("Loading VPN topology information for {0} sites, please wait...".format(len(site_id_list)))
for anynet_key, link in all_anynets.items():
source_swi = link.get()
if not source_swi:
source_swi = link.get()
dest_swi = link.get()
if not dest_swi:
dest_swi = link.get()
source_site_id = swi_to_site_dict.get(source_swi, )
target_site_id = swi_to_site_dict.get(dest_swi, )
source_wan_network_name = link.get("source_wan_network")
target_wan_network_name = link.get("target_wan_network")
source_site_name,
source_wan_network_name,
source_swi_name,
target_site_name,
target_wan_network_name,
target_swi_name,
)
path_id_to_name[anynet_key] = anynet_text
logger.info("SWI -> WN xlate ({0}): {1}".format(len(swi_to_wan_network_dict),
json.dumps(swi_to_wan_network_dict, indent=4)))
logger.info("All Anynets ({0}): {1}".format(len(all_anynets),
json.dumps(all_anynets, indent=4)))
logger.info("All VPNs ({0}): {1}".format(len(all_vpns),
json.dumps(all_vpns, indent=4)))
logger.info("Site -> SWI construct ({0}): {1}".format(len(site_swi_dict),
json.dumps(site_swi_dict, indent=4)))
logger.info("WN to SWI xlate ({0}): {1}".format(len(wan_network_to_swi_dict),
json.dumps(wan_network_to_swi_dict, indent=4)))
logger.info("SWI -> SITE xlate ({0}): {1}".format(len(swi_to_site_dict),
json.dumps(swi_to_site_dict, indent=4)))
for vpn_key, link in all_vpns.items():
anynet_link_id = link.get("anynet_link_id")
source_element_id = link.get("source_node_id")
target_element_id = link.get("target_node_id")
vpn_id_to_anynet_id[vpn_key] = anynet_link_id
source_element_name = id_element_dict.get(source_element_id, source_element_id)
target_element_name = id_element_dict.get(target_element_id, target_element_id)
anynet_text = path_id_to_name.get(anynet_link_id, anynet_link_id)
vpn_text = "[{0}] : {1} : [{2}]".format(
source_element_name,
anynet_text,
target_element_name
)
path_id_to_name[vpn_key] = vpn_text
global_id_name_dict.update(path_id_to_name)
if reverse:
return global_id_name_dict, global_name_id_dict
return global_id_name_dict | Generate the ID-NAME map dict
:param sdk: CloudGenix API constructor
:param reverse: Generate reverse name-> ID map as well, return tuple with both.
:return: ID Name dictionary |
388,606 | def qs(schema):
def wrapper(func):
setattr(func, QS, schema)
return func
return wrapper | Decorate a function with a query string schema. |
388,607 | def fractional(value):
1/31 3/101/31
try:
number = float(value)
except (TypeError, ValueError):
return value
wholeNumber = int(number)
frac = Fraction(number - wholeNumber).limit_denominator(1000)
numerator = frac._numerator
denominator = frac._denominator
if wholeNumber and not numerator and denominator == 1:
return % wholeNumber
elif not wholeNumber:
return % (numerator, denominator)
else:
return % (wholeNumber, numerator, denominator) | There will be some cases where one might not want to show
ugly decimal places for floats and decimals.
This function returns a human readable fractional number
in form of fractions and mixed fractions.
Pass in a string, or a number or a float, and this function returns
a string representation of a fraction
or whole number
or a mixed fraction
Examples:
fractional(0.3) will return '1/3'
fractional(1.3) will return '1 3/10'
fractional(float(1/3)) will return '1/3'
fractional(1) will return '1'
This will always return a string. |
388,608 | def _GetDiscoveryDocFromFlags(args):
if args.discovery_url:
try:
return util.FetchDiscoveryDoc(args.discovery_url)
except exceptions.CommunicationError:
raise exceptions.GeneratedClientError(
)
infile = os.path.expanduser(args.infile) or
with io.open(infile, encoding=) as f:
return json.loads(util.ReplaceHomoglyphs(f.read())) | Get the discovery doc from flags. |
388,609 | def get_trace(self, frame, tb):
import linecache
frames = []
stack, _ = self.get_stack(frame, tb)
current = 0
for i, (stack_frame, lno) in enumerate(stack):
code = stack_frame.f_code
filename = code.co_filename or
line = None
if filename[0] == and filename[-1] == :
line = get_source_from_byte_code(code)
fn = filename
else:
fn = os.path.abspath(filename)
if not line:
linecache.checkcache(filename)
line = linecache.getline(filename, lno, stack_frame.f_globals)
if not line:
line = self.compile_cache.get(id(code), )
line = to_unicode_string(line, filename)
line = line and line.strip()
startlnos = dis.findlinestarts(code)
lastlineno = list(startlnos)[-1][1]
if frame == stack_frame:
current = i
frames.append({
: fn,
: code.co_name,
: code.co_firstlineno,
: lastlineno,
: lno,
: line,
: i,
: frame == stack_frame
})
return stack, frames, current | Get a dict of the traceback for wdb.js use |
388,610 | def setref(graphtable=None, comptable=None, thermtable=None,
area=None, waveset=None):
global GRAPHTABLE, COMPTABLE, THERMTABLE, PRIMARY_AREA, GRAPHDICT, COMPDICT, THERMDICT
GRAPHDICT = {}
COMPDICT = {}
THERMDICT = {}
kwds=set([graphtable,comptable,thermtable,area,waveset])
if kwds == set([None]):
_set_default_refdata()
return
if graphtable is not None:
GRAPHTABLE = irafconvert(graphtable)
if comptable is not None:
COMPTABLE = irafconvert(comptable)
if thermtable is not None:
THERMTABLE = irafconvert(thermtable)
if area is not None:
PRIMARY_AREA = area
if waveset is not None:
if len(waveset) not in (3, 4):
raise ValueError()
minwave = waveset[0]
maxwave = waveset[1]
num = waveset[2]
if len(waveset) == 3:
log = True
elif len(waveset) == 4:
if waveset[3].lower() == :
log = True
elif waveset[3].lower() == :
log = False
else:
raise ValueError()
set_default_waveset(minwave,maxwave,num,log=log)
return | Set default graph and component tables, primary area, and
wavelength set.
This is similar to setting ``refdata`` in IRAF STSDAS SYNPHOT.
If all parameters set to `None`, they are reverted to software default.
If any of the parameters are not `None`, they are set to desired
values while the rest (if any) remain at current setting.
Parameters
----------
graphtable, comptable, thermtable : str or `None`
Graph, component, and thermal table names, respectively,
for `~pysynphot.observationmode` throughput look-up.
Do not use "*" wildcard.
area : float or `None`
Telescope collecting area, i.e., the primary
mirror, in :math:`\\textnormal{cm}^{2}`.
waveset : tuple or `None`
Parameters for :func:`set_default_waveset` as follow:
* ``(minwave, maxwave, num)`` - This assumes log scale.
* ``(minwave, maxwave, num, 'log')``
* ``(minwave, maxwave, num, 'linear')``
Raises
------
ValueError
Invalid ``waveset`` parameters. |
388,611 | def _resolve_input(variable, variable_name, config_key, config):
if variable is None:
try:
variable = config.get(PROFILE, config_key)
except NoOptionError:
raise ValueError((
).format(variable_name))
return variable | Resolve input entered as option values with config values
If option values are provided (passed in as `variable`), then they are
returned unchanged. If `variable` is None, then we first look for a config
value to use.
If no config value is found, then raise an error.
Parameters
----------
variable: string or numeric
value passed in as input by the user
variable_name: string
name of the variable, for clarity in the error message
config_key: string
key in the config whose value could be used to fill in the variable
config: ConfigParser
contains keys/values in .apparatecfg |
388,612 | def view_packages(self, *args):
ver = GetFromInstalled(args[1]).version()
print(" {0}{1}{2}{3} {4}{5} {6}{7}{8}{9}{10}{11:>11}{12}".format(
args[0], args[1] + ver, self.meta.color["ENDC"],
" " * (23-len(args[1] + ver)), args[2],
" " * (18-len(args[2])), args[3],
" " * (15-len(args[3])), "",
"", "SBo", "", "")).rstrip() | :View slackbuild packages with version and arch
args[0] package color
args[1] package
args[2] version
args[3] arch |
388,613 | def _mark_image_file_deleted(cls, mapper, connection, target):
cls._deleted_images.add((target, get_current_store())) | When the session flushes, marks images as deleted.
The files of this marked images will be actually deleted
in the image storage when the ongoing transaction succeeds.
If it fails the :attr:`_deleted_images` queue will be just
empty. |
388,614 | def pivot(self):
self.op_data = [list(i) for i in zip(*self.ip_data)] | transposes rows and columns |
388,615 | def fill_view(self, view):
other = view.hist
_other_x_center = other.axis(0).GetBinCenter
_other_y_center = other.axis(1).GetBinCenter
_other_z_center = other.axis(2).GetBinCenter
_other_get = other.GetBinContent
_other_get_bin = super(_HistBase, other).GetBin
other_sum_w2 = other.GetSumw2()
_other_sum_w2_at = other_sum_w2.At
_find = self.FindBin
sum_w2 = self.GetSumw2()
_sum_w2_at = sum_w2.At
_sum_w2_setat = sum_w2.SetAt
_set = self.SetBinContent
_get = self.GetBinContent
for x, y, z in view.points:
idx = _find(
_other_x_center(x),
_other_y_center(y),
_other_z_center(z))
other_idx = _other_get_bin(x, y, z)
_set(idx, _get(idx) + _other_get(other_idx))
_sum_w2_setat(
_sum_w2_at(idx) + _other_sum_w2_at(other_idx),
idx) | Fill this histogram from a view of another histogram |
388,616 | def total_branches(self):
exit_counts = self.parser.exit_counts()
return sum([count for count in exit_counts.values() if count > 1]) | How many total branches are there? |
388,617 | def call_fn(fn: TransitionOperator, args: Union[Tuple[Any], Any]) -> Any:
if isinstance(args, (list, tuple)) and not mcmc_util.is_namedtuple_like(args):
args = args
return fn(*args)
else:
return fn(args) | Calls a transition operator with args, unpacking args if its a sequence.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: Return value of `fn`. |
388,618 | def identify_needed_data(curr_exe_job, link_job_instance=None):
data_lengths, valid_chunks = curr_exe_job.get_valid_times()
valid_lengths = [abs(valid_chunk) for valid_chunk in valid_chunks]
if link_job_instance:
start_data_loss = max(valid_chunks[0][0], link_valid_chunk[0][0])
end_data_loss = max(data_lengths[0] - valid_chunks[0][1],\
link_data_length[0] - link_valid_chunk[0][1])
valid_chunks[0] = segments.segment(start_data_loss, \
data_lengths[0] - end_data_loss)
link_valid_chunk = segments.segment(start_data_loss, \
link_data_length[0] - end_data_loss)
link_valid_length = abs(link_valid_chunk)
if link_valid_length < valid_lengths[0]:
valid_lengths[0] = link_valid_length
return data_lengths, valid_chunks, valid_lengths | This function will identify the length of data that a specific executable
needs to analyse and what part of that data is valid (ie. inspiral doesn't
analyse the first or last 64+8s of data it reads in).
In addition you can supply a second job instance to "link" to, which will
ensure that the two jobs will have a one-to-one correspondence (ie. one
template bank per one matched-filter job) and the corresponding jobs will
be "valid" at the same times.
Parameters
-----------
curr_exe_job : Job
An instance of the Job class that has a get_valid times method.
link_job_instance : Job instance (optional),
Coordinate the valid times with another executable.
Returns
--------
dataLength : float
The amount of data (in seconds) that each instance of the job must read
in.
valid_chunk : glue.segment.segment
The times within dataLength for which that jobs output **can** be
valid (ie. for inspiral this is (72, dataLength-72) as, for a standard
setup the inspiral job cannot look for triggers in the first 72 or
last 72 seconds of data read in.)
valid_length : float
The maximum length of data each job can be valid for. If not using
link_job_instance this is abs(valid_segment), but can be smaller than
that if the linked job only analyses a small amount of data (for e.g.). |
388,619 | def find_out_var(self, varnames=[]):
if self.wdir != :
stdout = "%s/%s"%(self.wdir, self.stdout)
else:
stdout = self.stdout
response = [None]*len(varnames)
if os.path.exists(stdout):
with open_(stdout, ) as f:
for line in f:
if in line:
var = line.strip().split()
value = var[1].strip()
var = var[0].strip()
if var in varnames: response[varnames.index(var)] = value
else:
debug.log("Error: The stdout file %s does not exist!"%(stdout))
return response | This function will read the standard out of the program, catch
variables and return the values
EG. #varname=value |
388,620 | def handle(self):
database = self.option("database")
self.resolver.set_default_connection(database)
repository = DatabaseMigrationRepository(self.resolver, "migrations")
migrator = Migrator(repository, self.resolver)
if not migrator.repository_exists():
return self.error("No migrations found")
self._prepare_database(migrator, database)
path = self.option("path")
if path is None:
path = self._get_migration_path()
ran = migrator.get_repository().get_ran()
migrations = []
for migration in migrator._get_migration_files(path):
if migration in ran:
migrations.append(["<fg=cyan>%s</>" % migration, "<info>Yes</>"])
else:
migrations.append(["<fg=cyan>%s</>" % migration, "<fg=red>No</>"])
if migrations:
table = self.table(["Migration", "Ran?"], migrations)
table.render()
else:
return self.error("No migrations found")
for note in migrator.get_notes():
self.line(note) | Executes the command. |
388,621 | def rate_of_return(period_ret, base_period):
period_len = period_ret.name
conversion_factor = (pd.Timedelta(base_period) /
pd.Timedelta(period_len))
return period_ret.add(1).pow(conversion_factor).sub(1) | Convert returns to 'one_period_len' rate of returns: that is the value the
returns would have every 'one_period_len' if they had grown at a steady
rate
Parameters
----------
period_ret: pd.DataFrame
DataFrame containing returns values with column headings representing
the return period.
base_period: string
The base period length used in the conversion
It must follow pandas.Timedelta constructor format (e.g. '1 days',
'1D', '30m', '3h', '1D1h', etc)
Returns
-------
pd.DataFrame
DataFrame in same format as input but with 'one_period_len' rate of
returns values. |
388,622 | def read(self, size=-1):
if self.connected is None:
return None
data = self._get_input(size)
return data | ! @brief Return bytes read from the connection. |
388,623 | def find_or_graft(self, board):
is_duplicate_board = True
node = self
for p, new_tile in board.positions_with_tile():
found_tile = False
for child in node.children:
if child.tile == new_tile:
node = child
found_tile = True
break
if found_tile:
pass
else:
child = _DuplicateTree(new_tile)
node.graft_child(child)
node = child
is_duplicate_board = False
return is_duplicate_board | Build a tree with each level corresponding to a fixed position on
board. A path of tiles is stored for each board. If any two boards
have the same path, then they are the same board. If there is any
difference, a new branch will be created to store that path.
Return: True if board already exists in the tree; False otherwise |
388,624 | def InitUI(self):
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.init_grid_headers()
self.grid_builder = GridBuilder(self.er_magic, self.grid_type, self.grid_headers,
self.panel, self.parent_type)
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
self.add_cols_button = wx.Button(self.panel, label="Add additional columns",
name=)
self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button)
self.remove_cols_button = wx.Button(self.panel, label="Remove columns",
name=)
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
self.remove_row_button = wx.Button(self.panel, label="Remove last row",
name=)
self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button)
many_rows_box = wx.BoxSizer(wx.HORIZONTAL)
self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)",
name=)
self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value=, initial=1,
name=)
many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE)
many_rows_box.Add(self.rows_spin_ctrl)
self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button)
self.deleteRowButton = wx.Button(self.panel, id=-1, label=, name=)
self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton)
self.deleteRowButton.Disable()
self.importButton = wx.Button(self.panel, id=-1,
label=, name=)
self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton)
self.exitButton = wx.Button(self.panel, id=-1,
label=, name=)
self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton)
self.cancelButton = wx.Button(self.panel, id=-1, label=, name=)
self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton)
self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help",
name=)
self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn)
self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name=), wx.VERTICAL)
self.default_msg_text = .format(self.grid_type + )
txt =
if self.grid_type == :
txt =
if self.grid_type == :
txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the samplespecimens class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically."
if self.grid_type == :
txt = "\n\nNote: only ages for which you provide data will be written to your upload file."
self.default_msg_text += txt
self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text,
style=wx.TE_CENTER, name=)
self.help_msg_boxsizer.Add(self.msg_text)
self.help_msg_boxsizer.ShowItems(False)
self.toggle_codes_btn = wx.Button(self.panel, id=-1, label="Show method codes",
name=)
self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn)
self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, vocab)
self.code_msg_boxsizer.ShowItems(False)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label=,
name=), wx.VERTICAL)
row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label=,
name=), wx.VERTICAL)
main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label=,
name=), wx.VERTICAL)
col_btn_vbox.Add(self.add_cols_button, 1, flag=wx.ALL, border=5)
col_btn_vbox.Add(self.remove_cols_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(many_rows_box, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.remove_row_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.deleteRowButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.importButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.exitButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.cancelButton, 1, flag=wx.ALL, border=5)
self.hbox.Add(col_btn_vbox, 1)
self.hbox.Add(row_btn_vbox, 1)
self.hbox.Add(main_btn_vbox, 1)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit)
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
if self.grid_type == :
self.grid_builder.add_age_data_to_grid()
if self.parent_type:
belongs_to = sorted(self.er_magic.data_lists[self.parent_type][0], key=lambda item: item.name)
else:
belongs_to =
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, belongs_to)
self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name=), wx.VERTICAL)
self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5)
if self.grid_type == :
lat_lon_dict = self.er_magic.get_min_max_lat_lon(self.er_magic.locations)
for loc in self.er_magic.locations:
d = lat_lon_dict[loc.name]
col_labels = [self.grid.GetColLabelValue(col) for col in range(self.grid.GetNumberCols())]
row_labels = [self.grid.GetCellValue(row, 0) for row in range(self.grid.GetNumberRows())]
for key, value in list(d.items()):
if value:
if str(loc.er_data[key]) == str(value):
pass
else:
loc.er_data[key] = value
col_ind = col_labels.index(key)
row_ind = row_labels.index(loc.name)
self.grid.SetCellValue(row_ind, col_ind, str(value))
if not self.grid.changes:
self.grid.changes = set([row_ind])
else:
self.grid.changes.add(row_ind)
if self.grid_type == :
self.remove_row_button.Disable()
self.add_many_rows_button.Disable()
self.grid.SetColLabelValue(0, )
toggle_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label=, name=), wx.VERTICAL)
levels = [, , , ]
age_level = pw.radio_buttons(self.panel, levels, )
level_ind = levels.index(self.er_magic.age_type)
age_level.radio_buttons[level_ind].SetValue(True)
toggle_box.Add(age_level)
self.Bind(wx.EVT_RADIOBUTTON, self.toggle_ages)
self.hbox.Add(toggle_box)
if self.grid_type == :
self.drop_down_menu.choices[2] = [sorted([spec.name for spec in self.er_magic.specimens if spec]), False]
self.drop_down_menu.choices[3] = [sorted([samp.name for samp in self.er_magic.samples if samp]), False]
self.drop_down_menu.choices[4] = [sorted([site.name for site in self.er_magic.sites if site]), False]
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
for row in range(self.grid.GetNumberRows()):
result_name = self.grid.GetCellValue(row, 0)
result = self.er_magic.find_by_name(result_name, self.er_magic.results)
if result:
if result.specimens:
self.grid.SetCellValue(row, 2, .join([pmag.get_attr(spec) for spec in result.specimens]))
if result.samples:
self.grid.SetCellValue(row, 3, .join([pmag.get_attr(samp) for samp in result.samples]))
if result.sites:
self.grid.SetCellValue(row, 4, .join([pmag.get_attr(site) for site in result.sites]))
if result.locations:
self.grid.SetCellValue(row, 5, .join([pmag.get_attr(loc) for loc in result.locations]))
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, border=20)
self.main_sizer.Add(self.toggle_help_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.help_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=10)
self.main_sizer.Add(self.toggle_codes_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.code_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.EXPAND, border=10)
self.panel.SetSizer(self.main_sizer)
self.main_sizer.Fit(self)
self.Centre()
self.Show() | initialize window |
388,625 | def code_from_ipynb(nb, markdown=False):
code = PREAMBLE
for cell in nb[]:
if cell[] == :
code += .join(cell[])
if cell[] == :
code += + .join(cell[])
code +=
return code | Get the code for a given notebook
nb is passed in as a dictionary that's a parsed ipynb file |
388,626 | def animation_dialog(images, delay_s=1., loop=True, **kwargs):
def _as_pixbuf(image):
if isinstance(image, types.StringTypes):
return gtk.gdk.pixbuf_new_from_file(image)
else:
return image
pixbufs = map(_as_pixbuf, images)
gtk.gdk.threads_init()
dialog = gtk.MessageDialog(**kwargs)
image = gtk.Image()
content_area = dialog.get_content_area()
content_area.pack_start(image)
content_area.show_all()
stop_animation = threading.Event()
def _stop_animation(*args):
stop_animation.set()
def _animate(dialog):
def __animate():
if loop:
frames = it.cycle(pixbufs)
else:
frames = pixbufs
for pixbuf_i in frames:
gobject.idle_add(image.set_from_pixbuf, pixbuf_i)
if stop_animation.wait(delay_s):
break
thread = threading.Thread(target=__animate)
thread.daemon = True
thread.start()
dialog.connect(, _stop_animation)
dialog.connect(, _animate)
return dialog | .. versionadded:: v0.19
Parameters
----------
images : list
Filepaths to images or :class:`gtk.Pixbuf` instances.
delay_s : float, optional
Number of seconds to display each frame.
Default: ``1.0``.
loop : bool, optional
If ``True``, restart animation after last image has been displayed.
Default: ``True``.
Returns
-------
gtk.MessageDialog
Message dialog with animation displayed in `gtk.Image` widget when
dialog is run. |
388,627 | def write_puml(self, filename=):
def get_type(o):
type =
if isinstance(o, AbstractSensor):
type =
elif isinstance(o, AbstractActuator):
type =
return type
if filename:
s = open(filename, )
else:
s = io.StringIO()
s.write()
s.write()
for k, v in list(self.background_colors.items()):
s.write( % (k, v))
s.write()
for o in self.system.objects:
if isinstance(o, DefaultProgram) or o.hide_in_uml:
continue
if isinstance(o, ProgrammableSystemObject):
s.write( % (o, o, get_type(o)))
s.write( % (o, o.class_name))
if isinstance(o, AbstractActuator):
for p in reversed(o.program_stack):
s.write( % (o, p, o.program_status.get(p, )))
elif hasattr(o, ):
s.write( % (o, o.status))
if getattr(o, , False):
s.write( % (o, o.priority))
for t in o.actual_triggers:
if isinstance(t, DefaultProgram) or t.hide_in_uml:
continue
s.write( % (t, self.arrow_colors[], o))
for t in o.actual_targets:
if t.hide_in_uml:
continue
if o.active:
color =
else:
color =
if getattr(t, , None) == o:
color =
s.write( % (o, self.arrow_colors[color], t))
s.write()
if filename:
s.close()
else:
return s.getvalue() | Writes PUML from the system. If filename is given, stores result in the file.
Otherwise returns result as a string. |
388,628 | def encrypt(passwd):
m = sha1()
salt = hexlify(os.urandom(salt_len))
m.update(unicode2bytes(passwd) + salt)
crypted = bytes2unicode(salt) + m.hexdigest()
return crypted | Encrypts the incoming password after adding some salt to store
it in the database.
@param passwd: password portion of user credentials
@type passwd: string
@returns: encrypted/salted string |
388,629 | def find_first_file_with_ext(base_paths, prefix, exts):
for base_path in base_paths:
for ext in exts:
filename = os.path.join(base_path, "%s%s" % (prefix, ext))
if os.path.exists(filename) and os.path.isfile(filename):
logger.debug("Found first file with relevant extension: %s", filename)
return base_path, ext
logger.debug("No files found for prefix %s, extensions %s", prefix, ", ".join(exts))
return None, None | Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None). |
388,630 | def update(self, dt):
self.translate(dt * self.velocity)
self.rotate(dt * self.angular_velocity) | Update the shape's position by moving it forward according to its velocity.
Parameters
----------
dt : float |
388,631 | def parse_bug_activity(raw_html):
def is_activity_empty(bs):
EMPTY_ACTIVITY = "No changes have been made to this (?:bug|issue) yet."
tag = bs.find(text=re.compile(EMPTY_ACTIVITY))
return tag is not None
def find_activity_table(bs):
tables = bs.find_all()
for tb in tables:
nheaders = len(tb.tr.find_all(, recursive=False))
if nheaders == 5:
return tb
raise ParseError(cause="Table of bug activity not found.")
def remove_tags(bs):
HTML_TAGS_TO_REMOVE = [, , ]
for tag in bs.find_all(HTML_TAGS_TO_REMOVE):
tag.replaceWith(tag.text)
def format_text(bs):
strings = [s.strip() for s in bs.stripped_strings]
s = .join(strings)
return s
bs = bs4.BeautifulSoup(raw_html, )
if is_activity_empty(bs):
fields = []
else:
activity_tb = find_activity_table(bs)
remove_tags(activity_tb)
fields = activity_tb.find_all()
while fields:
who = fields.pop(0)
when = fields.pop(0)
n = int(who.get())
for _ in range(n):
what = fields.pop(0)
removed = fields.pop(0)
added = fields.pop(0)
event = {: format_text(who),
: format_text(when),
: format_text(what),
: format_text(removed),
: format_text(added)}
yield event | Parse a Bugzilla bug activity HTML stream.
This method extracts the information about activity from the
given HTML stream. The bug activity is stored into a HTML
table. Each parsed activity event is returned into a dictionary.
If the given HTML is invalid, the method will raise a ParseError
exception.
:param raw_html: HTML string to parse
:returns: a generator of parsed activity events
:raises ParseError: raised when an error occurs parsing
the given HTML stream |
388,632 | def respond(self, output):
response = {: output.code,
: output.log}
self.send_response(200)
self.send_header(, )
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8")) | Generates server response. |
388,633 | def V(self, brightest=False):
mags = self.get_photometry(brightest=brightest, convert=False)
VT, dVT = mags[]
BT, dBT = mags[]
if (-0.25 < BT - VT < 2.0):
(a, b, c, d) = (0.00097, 0.1334, 0.05486, 0.01998)
V = (VT + a - b * (BT - VT) + c * (BT - VT)**2 -
d * (BT - VT)**3)
dVdVT = 1 + b - 2*c*(BT-VT) + 3*d*(BT-VT)**2
dVdBT = -b + 2*c*(BT-VT) - 3*d*(BT-VT)**2
dV = np.sqrt((dVdVT**2 * dVT**2) + (dVdBT**2*dBT**2))
else:
raise ValueError()
return V, dV | http://www.aerith.net/astro/color_conversion.html |
388,634 | def extend(self, *args, **kwargs):
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({0} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value) | Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__ |
388,635 | def isclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False):
def within_tol(x, y, atol, rtol):
with np.errstate(invalid=):
result = np.less_equal(abs(x-y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
try:
dt = np.result_type(y, 1.)
except TypeError:
dt = np.dtype(np.quaternion)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
if np.isscalar(a) and np.isscalar(b):
return bool(cond)
else:
return cond | Returns a boolean array where two arrays are element-wise equal within a
tolerance.
This function is essentially a copy of the `numpy.isclose` function,
with different default tolerances and one minor changes necessary to
deal correctly with quaternions.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> quaternion.isclose([1e10*quaternion.x, 1e-7*quaternion.y], [1.00001e10*quaternion.x, 1e-8*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([True, False])
>>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.00001e10*quaternion.x, 1e-9*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([True, True])
>>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.0001e10*quaternion.x, 1e-9*quaternion.y],
... rtol=1.e-5, atol=1.e-8)
array([False, True])
>>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y])
array([True, False])
>>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y], equal_nan=True)
array([True, True]) |
388,636 | def create(cls, second_line, name_on_card, alias=None, type_=None,
pin_code_assignment=None, monetary_account_id_fallback=None,
custom_headers=None):
if custom_headers is None:
custom_headers = {}
request_map = {
cls.FIELD_SECOND_LINE: second_line,
cls.FIELD_NAME_ON_CARD: name_on_card,
cls.FIELD_ALIAS: alias,
cls.FIELD_TYPE: type_,
cls.FIELD_PIN_CODE_ASSIGNMENT: pin_code_assignment,
cls.FIELD_MONETARY_ACCOUNT_ID_FALLBACK: monetary_account_id_fallback
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
api_client = client.ApiClient(cls._get_api_context())
request_bytes = request_map_string.encode()
request_bytes = security.encrypt(cls._get_api_context(), request_bytes,
custom_headers)
endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id())
response_raw = api_client.post(endpoint_url, request_bytes,
custom_headers)
return BunqResponseCardDebit.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_POST)
) | Create a new debit card request.
:type user_id: int
:param second_line: The second line of text on the card, used as
name/description for it. It can contain at most 17 characters and it can
be empty.
:type second_line: str
:param name_on_card: The user's name as it will be on the card. Check
'card-name' for the available card names for a user.
:type name_on_card: str
:param alias: The pointer to the monetary account that will be connected
at first with the card. Its IBAN code is also the one that will be
printed on the card itself. The pointer must be of type IBAN.
:type alias: object_.Pointer
:param type_: The type of card to order. Can be MAESTRO or MASTERCARD.
:type type_: str
:param pin_code_assignment: Array of Types, PINs, account IDs assigned
to the card.
:type pin_code_assignment: list[object_.CardPinAssignment]
:param monetary_account_id_fallback: ID of the MA to be used as fallback
for this card if insufficient balance. Fallback account is removed if
not supplied.
:type monetary_account_id_fallback: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseCardDebit |
388,637 | def H(self, phase, T):
try:
return self._phases[phase].H(T)
except KeyError:
raise Exception("The phase was not found in compound ."
.format(phase, self.formula)) | Calculate the enthalpy of a phase of the compound at a specified
temperature.
:param phase: A phase of the compound, e.g. 'S', 'L', 'G'.
:param T: [K] temperature
:returns: [J/mol] Enthalpy. |
388,638 | def add_entity(self):
post_data = self.get_post_data()
if in post_data:
if post_data[] == :
self.add_pic(post_data)
elif post_data[] == :
self.add_pdf(post_data)
elif post_data[] == :
self.add_url(post_data)
else:
pass
else:
self.add_pic(post_data) | Add the entity. All the information got from the post data. |
388,639 | def __construct_claim_json(self):
def handle_qualifiers(old_item, new_item):
if not new_item.check_qualifier_equality:
old_item.set_qualifiers(new_item.get_qualifiers())
def is_good_ref(ref_block):
if len(WDItemEngine.databases) == 0:
WDItemEngine._init_ref_system()
prop_nrs = [x.get_prop_nr() for x in ref_block]
values = [x.get_value() for x in ref_block]
good_ref = True
prop_value_map = dict(zip(prop_nrs, values))
if self.good_refs and len(self.good_refs) > 0:
found_good = True
for rblock in self.good_refs:
if not all([k in prop_value_map for k, v in rblock.items()]):
found_good = False
if not all([v in prop_value_map[k] for k, v in rblock.items() if v]):
found_good = False
if found_good:
return True
return False
ref_properties = [, , ]
for v in values:
if prop_nrs[values.index(v)] == :
return True
elif v == :
return True
for p in ref_properties:
if p not in prop_nrs:
return False
for ref in ref_block:
pn = ref.get_prop_nr()
value = ref.get_value()
if pn == and value not in WDItemEngine.databases and not in prop_nrs:
return False
elif pn == and value in WDItemEngine.databases:
db_props = WDItemEngine.databases[value]
if not any([False if x not in prop_nrs else True for x in db_props]) and not in prop_nrs:
return False
return good_ref
def handle_references(old_item, new_item):
ref_properties = [, , , , ]
new_references = new_item.get_references()
old_references = old_item.get_references()
if any([z.overwrite_references for y in new_references for z in y]) \
or sum(map(lambda z: len(z), old_references)) == 0 \
or self.global_ref_mode == :
old_item.set_references(new_references)
elif self.global_ref_mode == or new_item.statement_ref_mode == :
pass
elif self.global_ref_mode == or new_item.statement_ref_mode == :
old_references.extend(new_references)
old_item.set_references(old_references)
elif self.global_ref_mode == or new_item.statement_ref_mode == :
self.ref_handler(old_item, new_item)
elif self.global_ref_mode == or new_item.statement_ref_mode == :
keep_block = [False for x in old_references]
for count, ref_block in enumerate(old_references):
stated_in_value = [x.get_value() for x in ref_block if x.get_prop_nr() == ]
if is_good_ref(ref_block):
keep_block[count] = True
new_ref_si_values = [x.get_value() if x.get_prop_nr() == else None
for z in new_references for x in z]
for si in stated_in_value:
if si in new_ref_si_values:
keep_block[count] = False
refs = [x for c, x in enumerate(old_references) if keep_block[c]]
refs.extend(new_references)
old_item.set_references(refs)
self.data.sort(key=lambda z: z.get_prop_nr().lower())
statements_for_deletion = []
for item in self.data:
if item.get_value() == and isinstance(item, WDBaseDataType):
statements_for_deletion.append(item.get_prop_nr())
if self.create_new_item:
self.statements = copy.copy(self.data)
else:
for stat in self.data:
prop_nr = stat.get_prop_nr()
prop_data = [x for x in self.statements if x.get_prop_nr() == prop_nr]
prop_pos = [x.get_prop_nr() == prop_nr for x in self.statements]
prop_pos.reverse()
insert_pos = len(prop_pos) - (prop_pos.index(True) if any(prop_pos) else 0)
if prop_nr in self.append_value:
equal_items = [stat == x for x in prop_data]
if True not in equal_items:
self.statements.insert(insert_pos + 1, stat)
else:
current_item = prop_data[equal_items.index(True)]
current_item.set_rank(stat.get_rank())
handle_references(old_item=current_item, new_item=stat)
handle_qualifiers(old_item=current_item, new_item=stat)
continue
for x in prop_data:
if hasattr(stat, ):
break
elif x.get_id() and not hasattr(x, ):
if self.keep_good_ref_statements:
if any([is_good_ref(r) for r in x.get_references()]):
setattr(x, , )
else:
setattr(x, , )
match = []
for i in prop_data:
if stat == i and hasattr(stat, ):
match.append(True)
setattr(i, , )
elif stat == i:
match.append(True)
setattr(i, , )
if hasattr(i, ):
delattr(i, )
handle_references(old_item=i, new_item=stat)
handle_qualifiers(old_item=i, new_item=stat)
i.set_rank(rank=stat.get_rank())
elif i.get_value():
match.append(False)
if True not in match and not hasattr(stat, ):
self.statements.insert(insert_pos + 1, stat)
for item in copy.deepcopy(self.statements):
if item.get_prop_nr() in statements_for_deletion and item.get_id() != :
setattr(item, , )
elif item.get_prop_nr() in statements_for_deletion:
self.statements.remove(item)
self.wd_json_representation[] = {}
for stat in self.statements:
prop_nr = stat.get_prop_nr()
if prop_nr not in self.wd_json_representation[]:
self.wd_json_representation[][prop_nr] = []
self.wd_json_representation[][prop_nr].append(stat.get_json_representation()) | Writes the properties from self.data to a new or existing json in self.wd_json_representation
:return: None |
388,640 | def isNonPairTag(self, isnonpair=None):
if isnonpair is None:
return self._isnonpairtag
if not self._istag:
return
if isnonpair:
self.endtag = None
self.childs = []
self._isnonpairtag = isnonpair | True if element is listed in nonpair tag table (``br`` for example) or
if it ends with ``/>`` (``<hr />`` for example).
You can also change state from pair to nonpair if you use this as
setter.
Args:
isnonpair (bool, default None): If set, internal nonpair state is
changed.
Returns:
book: True if tag is nonpair. |
388,641 | def save(self, sc, path):
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses)
java_model.save(sc._jsc.sc(), path) | Save this model to the given path. |
388,642 | def get_rup_array(ebruptures, srcfilter=nofilter):
if not BaseRupture._code:
BaseRupture.init()
rups = []
geoms = []
nbytes = 0
offset = 0
for ebrupture in ebruptures:
rup = ebrupture.rupture
mesh = surface_to_array(rup.surface)
sy, sz = mesh.shape[1:]
assert sy < TWO16, % sy
assert sz < TWO16,
points = mesh.reshape(3, -1).T
minlon = points[:, 0].min()
minlat = points[:, 1].min()
maxlon = points[:, 0].max()
maxlat = points[:, 1].max()
if srcfilter.integration_distance and len(srcfilter.close_sids(
(minlon, minlat, maxlon, maxlat),
rup.tectonic_region_type, rup.mag)) == 0:
continue
hypo = rup.hypocenter.x, rup.hypocenter.y, rup.hypocenter.z
rate = getattr(rup, , numpy.nan)
tup = (ebrupture.serial, ebrupture.srcidx, ebrupture.grp_id,
rup.code, ebrupture.n_occ, rup.mag, rup.rake, rate,
minlon, minlat, maxlon, maxlat,
hypo, offset, offset + len(points), sy, sz)
offset += len(points)
rups.append(tup)
geoms.append(numpy.array([tuple(p) for p in points], point3d))
nbytes += rupture_dt.itemsize + mesh.nbytes
if not rups:
return ()
dic = dict(geom=numpy.concatenate(geoms), nbytes=nbytes)
return hdf5.ArrayWrapper(numpy.array(rups, rupture_dt), dic) | Convert a list of EBRuptures into a numpy composite array, by filtering
out the ruptures far away from every site |
388,643 | def fixed_poch(a, n):
if (int(n) != n) or (n < 0):
raise ValueError("Parameter n must be a nonnegative int!")
n = int(n)
terms = [a + k for k in range(0, n)]
return scipy.prod(terms) | Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly.
Need conditional statement because scipy's impelementation of the Pochhammer
symbol is wrong for negative integer arguments. This function uses the
definition from
http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/
Parameters
----------
a : float
The argument.
n : nonnegative int
The order. |
388,644 | def calculate_start_time(df):
if "time" in df:
df["time_arr"] = pd.Series(df["time"], dtype=)
elif "timestamp" in df:
df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]")
else:
return df
if "dataset" in df:
for dset in df["dataset"].unique():
time_zero = df.loc[df["dataset"] == dset, "time_arr"].min()
df.loc[df["dataset"] == dset, "start_time"] = \
df.loc[df["dataset"] == dset, "time_arr"] - time_zero
else:
df["start_time"] = df["time_arr"] - df["time_arr"].min()
return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore") | Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset |
388,645 | def load_time_series(filename, delimiter=r):
r
times, values = load_delimited(filename, [float, float], delimiter)
times = np.array(times)
values = np.array(values)
return times, values | r"""Import a time series from an annotation file. The file should consist of
two columns of numeric values corresponding to the time and value of each
sample of the time series.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
times : np.ndarray
array of timestamps (float)
values : np.ndarray
array of corresponding numeric values (float) |
388,646 | def find_category(self, parent_alias, title):
found = None
child_ids = self.get_child_ids(parent_alias)
for cid in child_ids:
category = self.get_category_by_id(cid)
if category.title.lower() == title.lower():
found = category
break
return found | Searches parent category children for the given title (case independent).
:param str parent_alias:
:param str title:
:rtype: Category|None
:return: None if not found; otherwise - found Category |
388,647 | def init(
dist=,
minver=None,
maxver=None,
use_markdown_readme=True,
use_stdeb=False,
use_distribute=False,
):
if not minver == maxver == None:
import sys
if not minver <= sys.version < (maxver or ):
sys.stderr.write(
% (
sys.argv[0], minver or , maxver or , sys.version.split()[0]))
sys.exit(1)
if use_distribute:
from distribute_setup import use_setuptools
use_setuptools(to_dir=dist)
from setuptools import setup
else:
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if use_markdown_readme:
try:
import setuptools.command.sdist
setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, , ()))
+ [])
except ImportError:
pass
if use_stdeb:
import platform
if in platform.dist():
try:
import stdeb
except ImportError:
pass
return setup | Imports and returns a setup function.
If use_markdown_readme is set,
then README.md is added to setuptools READMES list.
If use_stdeb is set on a Debian based system,
then module stdeb is imported.
Stdeb supports building deb packages on Debian based systems.
The package should only be installed on the same system version
it was built on, though. See http://github.com/astraw/stdeb.
If use_distribute is set, then distribute_setup.py is imported. |
388,648 | def calc_percentile_interval(self, conf_percentage):
alpha = bc.get_alpha_from_conf_percentage(conf_percentage)
single_column_names =\
[.format(alpha / 2.0),
.format(100 - alpha / 2.0)]
conf_intervals =\
bc.calc_percentile_interval(self.bootstrap_replicates.values,
conf_percentage)
self.percentile_interval =\
pd.DataFrame(conf_intervals.T,
index=self.mle_params.index,
columns=single_column_names)
return None | Calculates percentile bootstrap confidence intervals for one's model.
Parameters
----------
conf_percentage : scalar in the interval (0.0, 100.0).
Denotes the confidence-level for the returned endpoints. For
instance, to calculate a 95% confidence interval, pass `95`.
Returns
-------
None. Will store the percentile intervals as `self.percentile_interval`
Notes
-----
Must have all ready called `self.generate_bootstrap_replicates`. |
388,649 | def monkey_patch(enabled=True):
if enabled:
Image.open = imdirect_open
else:
Image.open = pil_open | Monkey patching PIL.Image.open method
Args:
enabled (bool): If the monkey patch should be activated or deactivated. |
388,650 | def _variants(self, case_id, gemini_query):
individuals = []
case_obj = self.case(case_id)
for individual in case_obj.individuals:
individuals.append(individual)
self.db = case_obj.variant_source
self.variant_type = case_obj.variant_type
gq = GeminiQuery(self.db)
gq.run(gemini_query)
index = 0
for gemini_variant in gq:
variant = None
is_variant = self._is_variant(gemini_variant, individuals)
if self.variant_type == and not is_variant:
variant = None
else:
index += 1
logger.debug("Updating index to: {0}".format(index))
variant = self._format_variant(
case_id=case_id,
gemini_variant=gemini_variant,
individual_objs=individuals,
index=index
)
if variant:
yield variant | Return variants found in the gemini database
Args:
case_id (str): The case for which we want to see information
gemini_query (str): What variants should be chosen
filters (dict): A dictionary with filters
Yields:
variant_obj (dict): A Variant formatted dictionary |
388,651 | def init(self, force_deploy=False, client=None):
_force_deploy = self.provider_conf.force_deploy
self.provider_conf.force_deploy = _force_deploy or force_deploy
self._provider_conf = self.provider_conf.to_dict()
r = api.Resources(self._provider_conf, client=client)
r.launch()
roles = r.get_roles()
networks = r.get_networks()
return (_to_enos_roles(roles),
_to_enos_networks(networks)) | Reserve and deploys the nodes according to the resources section
In comparison to the vagrant provider, networks must be characterized
as in the networks key.
Args:
force_deploy (bool): True iff the environment must be redeployed
Raises:
MissingNetworkError: If one network is missing in comparison to
what is claimed.
NotEnoughNodesError: If the `min` constraints can't be met. |
388,652 | def path(self):
if self._root_dir is None:
override_buildroot = os.environ.get(, None)
if override_buildroot:
self._root_dir = override_buildroot
else:
self._root_dir = os.path.realpath(self.find_buildroot())
if PY2:
self._root_dir = self._root_dir.decode()
return self._root_dir | Returns the build root for the current workspace. |
388,653 | def _check_params(self,params):
overridden_object_params = list(self._overridden.param)
for item in params:
if item not in overridden_object_params:
self.param.warning(" will be ignored (not a Parameter).",item) | Print a warning if params contains something that is not a
Parameter of the overridden object. |
388,654 | def validate_backup_window(window):
hour = r
minute = r
r = ("(?P<start_hour>%s):(?P<start_minute>%s)-"
"(?P<end_hour>%s):(?P<end_minute>%s)") % (hour, minute, hour, minute)
range_regex = re.compile(r)
m = range_regex.match(window)
if not m:
raise ValueError("DBInstance PreferredBackupWindow must be in the "
"format: hh24:mi-hh24:mi")
start_ts = (int(m.group()) * 60) + int(m.group())
end_ts = (int(m.group()) * 60) + int(m.group())
if abs(end_ts - start_ts) < 30:
raise ValueError("DBInstance PreferredBackupWindow must be at least "
"30 minutes long.")
return window | Validate PreferredBackupWindow for DBInstance |
388,655 | def get_message_headers(self, section: Sequence[int] = None,
subset: Collection[bytes] = None,
inverse: bool = False) -> Writeable:
... | Get the headers from the message or a ``message/rfc822`` sub-part of
the message..
The ``section`` argument can index a nested sub-part of the message.
For example, ``[2, 3]`` would get the 2nd sub-part of the message and
then index it for its 3rd sub-part.
Args:
section: Optional nested list of sub-part indexes.
subset: Subset of headers to get.
inverse: If ``subset`` is given, this flag will invert it so that
the headers *not* in ``subset`` are returned. |
388,656 | def get_stack_refs(refs: list):
refs = list(refs)
refs.reverse()
stack_refs = []
last_stack = None
while refs:
ref = refs.pop()
if last_stack is not None and re.compile(r).match(ref):
stack_refs.append(StackReference(last_stack, ref))
else:
try:
with open(ref) as fd:
data = yaml.safe_load(fd)
ref = data[][]
except (OSError, IOError):
pass
if refs:
version = refs.pop()
else:
version = None
stack_refs.append(StackReference(ref, version))
last_stack = ref
return stack_refs | Returns a list of stack references with name and version. |
388,657 | def getBinding(self):
wsdl = self.getService().getWSDL()
return wsdl.bindings[self.binding] | Return the Binding object that is referenced by this port. |
388,658 | def ReadClientCrashInfo(self, client_id):
history = self.crash_history.get(client_id, None)
if not history:
return None
ts = max(history)
res = rdf_client.ClientCrash.FromSerializedString(history[ts])
res.timestamp = ts
return res | Reads the latest client crash record for a single client. |
388,659 | def _objective_decorator(func):
def inner(preds, dmatrix):
labels = dmatrix.get_label()
return func(labels, preds)
return inner | Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()`` |
388,660 | def almost_unitary(gate: Gate) -> bool:
res = (gate @ gate.H).asoperator()
N = gate.qubit_nb
return np.allclose(asarray(res), np.eye(2**N), atol=TOLERANCE) | Return true if gate tensor is (almost) unitary |
388,661 | def zsymDecorator(odd):
def wrapper(func):
@wraps(func)
def zsym_wrapper(*args,**kwargs):
if args[0]._zsym:
out= func(args[0],args[1],numpy.fabs(args[2]),**kwargs)
else:
out= func(*args,**kwargs)
if odd and args[0]._zsym:
return sign(args[2])*out
else:
return out
return zsym_wrapper
return wrapper | Decorator to deal with zsym=True input; set odd=True if the function is an odd function of z (like zforce) |
388,662 | def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode(, errors=), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections | Parses the sections in the memory and returns a list of them |
388,663 | def see(obj=DEFAULT_ARG, *args, **kwargs):
use_locals = obj is DEFAULT_ARG
if use_locals:
try:
prop = getattr(obj, attr)
except (AttributeError, Exception):
prop = SeeError()
action = output.display_name(name=attr, obj=prop, local=use_locals)
tokens.append(action)
if args or kwargs:
tokens = handle_deprecated_args(tokens, args, kwargs)
return output.SeeResult(tokens) | see(obj=anything)
Show the features and attributes of an object.
This function takes a single argument, ``obj``, which can be of any type.
A summary of the object is printed immediately in the Python interpreter.
For example::
>>> see([])
[] in + += *
*= < <= == !=
> >= dir() hash()
help() iter() len() repr()
reversed() str() .append() .clear()
.copy() .count() .extend() .index()
.insert() .pop() .remove() .reverse()
.sort()
If this function is run without arguments, it will instead list the objects
that are available in the current scope. ::
>>> see()
os random see() sys
The return value is an instance of :class:`SeeResult`. |
388,664 | def set_evernote_spec():
spec = NoteStore.NotesMetadataResultSpec()
spec.includeTitle = True
spec.includeAttributes = True
return spec | set the spec of the notes
:return: spec |
388,665 | def _firmware_update(firmwarefile=, host=,
directory=):
dest = os.path.join(directory, firmwarefile[7:])
__salt__[](firmwarefile, dest)
username = __pillar__[][]
password = __pillar__[][]
__salt__[](dest,
host=host,
admin_username=username,
admin_password=password) | Update firmware for a single host |
388,666 | def vm_detach_nic(name, kwargs=None, call=None):
if call != :
raise SaltCloudSystemExit(
)
if kwargs is None:
kwargs = {}
nic_id = kwargs.get(, None)
if nic_id is None:
raise SaltCloudSystemExit(
nic_id\
)
server, user, password = _get_xml_rpc()
auth = .join([user, password])
vm_id = int(get_vm_id(kwargs={: name}))
response = server.one.vm.detachnic(auth, vm_id, int(nic_id))
data = {
: ,
: response[0],
: response[1],
: response[2],
}
return data | Detaches a disk from a virtual machine.
.. versionadded:: 2016.3.0
name
The name of the VM from which to detach the network interface.
nic_id
The ID of the nic to detach.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_detach_nic my-vm nic_id=1 |
388,667 | def calculate_z2pt5_ngaw2(vs30):
NGA-West2 ground motion model for the average horizontal components of
PGA, PGV, and 5pct damped linear acceleration response spectra.
c1 = 7.089
c2 = -1.144
z2pt5 = numpy.exp(c1 + numpy.log(vs30) * c2)
return z2pt5 | Reads an array of vs30 values (in m/s) and
returns the depth to the 2.5 km/s velocity horizon (in km)
Ref: Campbell, K.W. & Bozorgnia, Y., 2014.
'NGA-West2 ground motion model for the average horizontal components of
PGA, PGV, and 5pct damped linear acceleration response spectra.'
Earthquake Spectra, 30(3), pp.1087–1114.
:param vs30: the shear wave velocity (in m/s) at a depth of 30 m |
388,668 | def crick_angles(p, reference_axis, tag=True, reference_axis_name=):
if not len(p) == len(reference_axis):
raise ValueError(
"The reference axis must contain the same number of points"
" as the Polymer primitive.")
prim_cas = p.primitive.coordinates
p_cas = p.get_reference_coords()
ref_points = reference_axis.coordinates
cr_angles = [
dihedral(ref_points[i], prim_cas[i], prim_cas[i + 1], p_cas[i])
for i in range(len(prim_cas) - 1)]
cr_angles.append(None)
if tag:
p.tags[reference_axis_name] = reference_axis
monomer_tag_name = .format(reference_axis_name)
for m, c in zip(p._monomers, cr_angles):
m.tags[monomer_tag_name] = c
return cr_angles | Returns the Crick angle for each CA atom in the `Polymer`.
Notes
-----
The final value is in the returned list is `None`, since the angle
calculation requires pairs of points on both the primitive and
reference_axis.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
tag : bool, optional
If `True`, tags the `Polymer` with the reference axis coordinates
and each Residue with its Crick angle. Crick angles are stored
at the Residue level, but are calculated using the CA atom.
reference_axis_name : str, optional
Used to name the keys in tags at Chain and Residue level.
Returns
-------
cr_angles : list(float)
The crick angles in degrees for each CA atom of the Polymer.
Raises
------
ValueError
If the Polymer and the reference_axis have unequal length. |
388,669 | def digest(self):
A = self.A
B = self.B
C = self.C
D = self.D
input = [] + self.input
count = [] + self.count
index = (self.count[0] >> 3) & 0x3f
if index < 56:
padLen = 56 - index
else:
padLen = 120 - index
padding = [b] + [b] * 63
self.update(padding[:padLen])
bits = _bytelist2long(self.input[:56]) + count
self._transform(bits)
digest = struct.pack("<IIII", self.A, self.B, self.C, self.D)
self.A = A
self.B = B
self.C = C
self.D = D
self.input = input
self.count = count
return digest | Terminate the message-digest computation and return digest.
Return the digest of the strings passed to the update()
method so far. This is a 16-byte string which may contain
non-ASCII characters, including null bytes. |
388,670 | def canonicalize_id(reference_id):
if u in reference_id:
return reference_id.replace(u, u)
m = _C14N_PATTERN.match(reference_id)
if m:
origin = m.group(1)
return reference_id.replace(origin, _C14N_FIXES[origin])
return MALFORMED_CABLE_IDS.get(reference_id, INVALID_CABLE_IDS.get(reference_id, reference_id)) | \
Returns the canonicalized form of the provided reference_id.
WikiLeaks provides some malformed cable identifiers. If the provided `reference_id`
is not valid, this method returns the valid reference identifier equivalent.
If the reference identifier is valid, the reference id is returned unchanged.
Note: The returned canonicalized identifier may not be a valid WikiLeaks identifier
anymore. In most cases the returned canonical form is identical to the WikiLeaks
identifier, but for malformed cable identifiers like "09SECTION01OF03SANJOSE525"
it is not (becomes "09SANJOSE525").
`reference_id`
The cable identifier to canonicalize |
388,671 | def decode_list(self, integers):
integers = list(np.squeeze(integers))
return self.encoders["inputs"].decode_list(integers) | List of ints to list of str. |
388,672 | def encode(self, raw=False):
buf = b""
if raw:
for tag, value in self.pairs:
buf += tag + b + value + SOH_STR
return buf
for tag, value in self.pairs:
if int(tag) in (8, 9, 35, 10):
continue
buf += tag + b + value + SOH_STR
if self.message_type is None:
raise ValueError("No message type set")
buf = b"35=" + self.message_type + SOH_STR + buf
body_length = len(buf)
if not self.begin_string:
raise ValueError("No begin string set")
buf = b"8=" + self.begin_string + SOH_STR + \
b"9=" + fix_val("%u" % body_length) + SOH_STR + \
buf
checksum = 0
for c in buf:
checksum += ord(c) if sys.version_info[0] == 2 else c
buf += b"10=" + fix_val("%03u" % (checksum % 256,)) + SOH_STR
return buf | Convert message to on-the-wire FIX format.
:param raw: If True, encode pairs exactly as provided.
Unless 'raw' is set, this function will calculate and
correctly set the BodyLength (9) and Checksum (10) fields, and
ensure that the BeginString (8), Body Length (9), Message Type
(35) and Checksum (10) fields are in the right positions.
This function does no further validation of the message content. |
388,673 | def destroyTempFiles(self):
os.system("rm -rf %s" % self.rootDir)
logger.debug("Temp files created: %s, temp files actively destroyed: %s" % (self.tempFilesCreated, self.tempFilesDestroyed)) | Destroys all temp temp file hierarchy, getting rid of all files. |
388,674 | def forward(self, src_seq, tgt_seq, src_valid_length=None, tgt_valid_length=None):
additional_outputs = []
encoder_outputs, encoder_additional_outputs = self.encode(src_seq,
valid_length=src_valid_length)
decoder_states = self.decoder.init_state_from_encoder(encoder_outputs,
encoder_valid_length=src_valid_length)
outputs, _, decoder_additional_outputs =\
self.decode_seq(tgt_seq, decoder_states, tgt_valid_length)
additional_outputs.append(encoder_additional_outputs)
additional_outputs.append(decoder_additional_outputs)
return outputs, additional_outputs | Generate the prediction given the src_seq and tgt_seq.
This is used in training an NMT model.
Parameters
----------
src_seq : NDArray
tgt_seq : NDArray
src_valid_length : NDArray or None
tgt_valid_length : NDArray or None
Returns
-------
outputs : NDArray
Shape (batch_size, tgt_length, tgt_word_num)
additional_outputs : list of list
Additional outputs of encoder and decoder, e.g, the attention weights |
388,675 | def returner(ret):
librato_conn = _get_librato(ret)
q = librato_conn.new_queue()
if ret[] == :
log.debug()
stats = _calculate_runtimes(ret[])
log.debug(, ret[])
q.add(,
ret[], tags={: ret[]})
log.debug(
,
stats[]
)
q.add(,
stats[], tags={: ret[]})
log.debug(
,
stats[]
)
q.add(,
stats[], tags={: ret[]})
log.debug(, stats[])
q.add(,
stats[], tags={: ret[]})
log.debug(
,
stats[] + stats[]
)
q.add(, stats[
] + stats[], tags={: ret[]})
log.info()
q.submit() | Parse the return data and return metrics to Librato. |
388,676 | def extractInputForTP(self, tm):
burstingColumns = tm.activeState["t"].sum(axis=1)
burstingColumns[ burstingColumns < tm.cellsPerColumn ] = 0
burstingColumns[ burstingColumns == tm.cellsPerColumn ] = 1
correctlyPredictedCells = numpy.zeros(self._inputDimensions).astype(realDType)
idx = (tm.predictedState["t-1"] + tm.activeState["t"]) == 2
idx = idx.reshape(self._inputDimensions)
correctlyPredictedCells[idx] = 1.0
spInputVector = tm.learnState["t"].reshape(self._inputDimensions)
return (correctlyPredictedCells, spInputVector, burstingColumns) | Extract inputs for TP from the state of temporal memory
three information are extracted
1. correctly predicted cells
2. all active cells
3. bursting cells (unpredicted input) |
388,677 | def fromfits(infile, hdu = 0, verbose = True):
pixelarray, hdr = ft.getdata(infile, hdu, header=True)
pixelarray = np.asarray(pixelarray).transpose()
pixelarrayshape = pixelarray.shape
if verbose :
print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1])
print "Input file BITPIX : %s" % (hdr["BITPIX"])
pixelarrayshape = np.asarray(pixelarrayshape)
if verbose :
print "Internal array type :", pixelarray.dtype.name
return f2nimage(pixelarray, verbose = verbose) | Factory function that reads a FITS file and returns a f2nimage object.
Use hdu to specify which HDU you want (primary = 0) |
388,678 | def rpush(self, key, value, *values):
return self.execute(b, key, value, *values) | Insert all the specified values at the tail of the list
stored at key. |
388,679 | def connection_from_promised_list(data_promise, args=None, **kwargs):
return data_promise.then(lambda data: connection_from_list(data, args, **kwargs)) | A version of `connectionFromArray` that takes a promised array, and returns a
promised connection. |
388,680 | def get_endpoint_path(self, endpoint_id):
s home from the globus config file. This
function is fragile but I don
config = os.path.expanduser("~/.globusonline/lta/config-paths")
if not os.path.exists(config):
bot.error()
sys.exit(1)
path = None
config = [x.split()[0] for x in read_file(config)]
for path in config:
if os.path.exists(path):
break
sys.exit(1)
return path | return the first fullpath to a folder in the endpoint based on
expanding the user's home from the globus config file. This
function is fragile but I don't see any other way to do it.
Parameters
==========
endpoint_id: the endpoint id to look up the path for |
388,681 | def on_disconnect(self, client, userdata, result_code):
self.log_info("Disconnected with result code " + str(result_code))
self.state_handler.set_state(State.goodbye)
time.sleep(5)
self.thread_handler.run(target=self.start_blocking) | Callback when the MQTT client is disconnected. In this case,
the server waits five seconds before trying to reconnected.
:param client: the client being disconnected.
:param userdata: unused.
:param result_code: result code. |
388,682 | def shutdown():
global _sdk_ref_count
global _sdk_instance
global _should_shutdown
with _sdk_ref_lk:
logger.debug("shutdown: ref count = %d, should_shutdown = %s", \
_sdk_ref_count, _should_shutdown)
nsdk = nativeagent.try_get_sdk()
if not nsdk:
logger.warning()
_sdk_ref_count = 0
return None
if _sdk_ref_count > 1:
logger.debug(, _sdk_ref_count)
_sdk_ref_count -= 1
return None
logger.info()
try:
if _should_shutdown:
_rc = nsdk.shutdown()
if _rc == ErrorCode.NOT_INITIALIZED:
logger.warning()
else:
nativeagent.checkresult(nsdk, _rc, )
_should_shutdown = False
except SDKError as e:
logger.warning(, exc_info=sys.exc_info())
return e
_sdk_ref_count = 0
_sdk_instance = None
nativeagent._force_initialize(None)
logger.debug()
return None | Shut down the SDK.
:returns: An exception object if an error occurred, a falsy value otherwise.
:rtype: Exception |
388,683 | def getmembers_static(cls):
names = set()
for scope in cls.scopes:
names.update(structured.getmembers_static(scope))
return names | Gets members (vars) from all scopes using ONLY static information.
You most likely want to use ScopeStack.getmembers instead.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'. |
388,684 | def html_to_rgb(html):
html = html.strip().lower()
if html[0]==:
html = html[1:]
elif html in NAMED_COLOR:
html = NAMED_COLOR[html][1:]
if len(html)==6:
rgb = html[:2], html[2:4], html[4:]
elif len(html)==3:
rgb = [ % (v,v) for v in html]
else:
raise ValueError("input
return tuple(((int(n, 16) / 255.0) for n in rgb)) | Convert the HTML color to (r, g, b).
Parameters:
:html:
the HTML definition of the color (#RRGGBB or #RGB or a color name).
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
Throws:
:ValueError:
If html is neither a known color name or a hexadecimal RGB
representation.
>>> '(%g, %g, %g)' % html_to_rgb('#ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('#f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon')
'(1, 0.980392, 0.803922)' |
388,685 | def _impl(lexer):
p = _sumterm(lexer)
tok = next(lexer)
if isinstance(tok, OP_rarrow):
q = _impl(lexer)
return (, p, q)
elif isinstance(tok, OP_lrarrow):
q = _impl(lexer)
return (, p, q)
else:
lexer.unpop_token(tok)
return p | Return an Implies expression. |
388,686 | def tracebacks_from_lines(lines_iter):
tbgrep = TracebackGrep()
for line in lines_iter:
tb = tbgrep.process(line)
if tb:
yield tb | Generator that yields tracebacks found in a lines iterator
The lines iterator can be:
- a file-like object
- a list (or deque) of lines.
- any other iterable sequence of strings |
388,687 | def get_command(self, ctx, name):
if not hasattr(self.resource, name):
return None
method = getattr(self.resource, name)
attrs = getattr(method, , {})
help_text = inspect.getdoc(method)
attrs[] = self._auto_help_text(help_text or )
ignore_defaults = attrs.pop(, False)
new_method = self._echo_method(method)
click_params = getattr(method, , [])
new_method.__click_params__ = copy(click_params)
new_method = with_global_options(new_method)
fao = attrs.pop(, True)
if fao:
for field in reversed(self.resource.fields):
if not field.is_option:
continue
if not isinstance(fao, bool) and field.name not in fao:
continue
args = [field.option]
if field.key:
args.insert(0, field.key)
short_fields = {
: ,
: ,
: ,
:
}
if field.name in short_fields:
args.append(+short_fields[field.name])
option_help = field.help
if isinstance(field.type, StructuredInput):
option_help +=
if field.required:
option_help = + option_help
elif field.read_only:
option_help = + option_help
option_help = + option_help
click.option(
*args,
default=field.default if not ignore_defaults else None,
help=option_help,
type=field.type,
show_default=field.show_default,
multiple=field.multiple,
is_eager=False
)(new_method)
cmd = click.command(name=name, cls=ActionSubcommand, **attrs)(new_method)
code = six.get_function_code(method)
if in code.co_varnames:
click.argument(, nargs=1, required=False, type=str, metavar=)(cmd)
return cmd | Retrieve the appropriate method from the Resource,
decorate it as a click command, and return that method. |
388,688 | def isVideo(self):
val=False
if self.__dict__[]:
if self.codec_type == :
val=True
return val | Is the stream labelled as a video stream. |
388,689 | def geometry_from_grid(self, grid, pixel_centres, pixel_neighbors, pixel_neighbors_size, buffer=1e-8):
y_min = np.min(grid[:, 0]) - buffer
y_max = np.max(grid[:, 0]) + buffer
x_min = np.min(grid[:, 1]) - buffer
x_max = np.max(grid[:, 1]) + buffer
shape_arcsec = (y_max - y_min, x_max - x_min)
origin = ((y_max + y_min) / 2.0, (x_max + x_min) / 2.0)
return self.Geometry(shape_arcsec=shape_arcsec, pixel_centres=pixel_centres, origin=origin,
pixel_neighbors=pixel_neighbors, pixel_neighbors_size=pixel_neighbors_size) | Determine the geometry of the Voronoi pixelization, by alligning it with the outer-most coordinates on a \
grid plus a small buffer.
Parameters
-----------
grid : ndarray
The (y,x) grid of coordinates which determine the Voronoi pixelization's geometry.
pixel_centres : ndarray
The (y,x) centre of every Voronoi pixel in arc-seconds.
origin : (float, float)
The arc-second origin of the Voronoi pixelization's coordinate system.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarrayy
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid. |
388,690 | def set_site_energies( self, energies ):
self.site_energies = energies
for site_label in energies:
for site in self.sites:
if site.label == site_label:
site.energy = energies[ site_label ] | Set the energies for every site in the lattice according to the site labels.
Args:
energies (Dict(Str:Float): Dictionary of energies for each site label, e.g.::
{ 'A' : 1.0, 'B', 0.0 }
Returns:
None |
388,691 | def ReadFileObject(self, file_object):
json_definitions = json.loads(file_object.read())
last_artifact_definition = None
for json_definition in json_definitions:
try:
artifact_definition = self.ReadArtifactDefinitionValues(json_definition)
except errors.FormatError as exception:
error_location =
if last_artifact_definition:
error_location = .format(last_artifact_definition.name)
raise errors.FormatError(
.format(error_location, exception))
yield artifact_definition
last_artifact_definition = artifact_definition | Reads artifact definitions from a file-like object.
Args:
file_object (file): file-like object to read from.
Yields:
ArtifactDefinition: an artifact definition.
Raises:
FormatError: if the format of the JSON artifact definition is not set
or incorrect. |
388,692 | def unfreeze(self):
for idx, child in enumerate(self.model.children()):
mu.unfreeze_layer(child) | Unfreeze model layers |
388,693 | def start(self):
resource_list = getattr(self, f)
resource_action = getattr(resource_list, self.resource_action)
resource_action(self.resource_name) | Execution happening on jhubctl. |
388,694 | def _make_3d(field, twod):
shp = list(field.shape)
if twod and in twod:
shp.insert(1, 1)
elif twod:
shp.insert(0, 1)
return field.reshape(shp) | Add a dimension to field if necessary.
Args:
field (numpy.array): the field that need to be 3d.
twod (str): 'XZ', 'YZ' or None depending on what is relevant.
Returns:
numpy.array: reshaped field. |
388,695 | def mlp(feature, hparams, name="mlp"):
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_dim = hparams.mlp_dim
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_dim, activation=tf.nn.relu)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature | Multi layer perceptron with dropout and relu activation. |
388,696 | def typing(self, *, channel: str):
payload = {"id": self._next_msg_id(), "type": "typing", "channel": channel}
self.send_over_websocket(payload=payload) | Sends a typing indicator to the specified channel.
This indicates that this app is currently
writing a message to send to a channel.
Args:
channel (str): The channel id. e.g. 'C024BE91L'
Raises:
SlackClientNotConnectedError: Websocket connection is closed. |
388,697 | def getJson(url):
site = urllib2.urlopen(url, timeout=300)
return json.load(site) | Download json and return simplejson object |
388,698 | def create_vpn_gateway(self, type, availability_zone=None):
params = { : type}
if availability_zone:
params[] = availability_zone
return self.get_object(, params, VpnGateway) | Create a new Vpn Gateway
:type type: str
:param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
:type availability_zone: str
:param availability_zone: The Availability Zone where you want the VPN gateway.
:rtype: The newly created VpnGateway
:return: A :class:`boto.vpc.vpngateway.VpnGateway` object |
388,699 | def create_blocking_connection(host):
return pika.BlockingConnection(
amqpdaemon.getConParams(
settings.get_amqp_settings()[host.lower()]["vhost"]
)
) | Return properly created blocking connection.
Args:
host (str): Host as it is defined in :func:`.get_amqp_settings`.
Uses :func:`edeposit.amqp.amqpdaemon.getConParams`. |
Subsets and Splits