code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def _load_rsp(rsp):
first = rsp.find() + 1
last = rsp.rfind()
return json.loads(rsp[first:last]) | Converts raw Flickr string response to Python dict |
def read_xml(filename):
parser = et.XMLParser(remove_blank_text=True)
isfile=False
try:
isfile = os.path.exists(filename)
except ValueError as e:
if in str(e):
pass
else:
raise
try:
if isfile:
return et.parse(filename, parser)
else:
r = et.fromstring(filename, parser)
return r.getroottree()
except IOError:
log.exception(.format(filename))
except et.XMLSyntaxError:
log.exception(.format(filename))
return None
return None | Use et to read in a xml file, or string, into a Element object.
:param filename: File to parse.
:return: lxml._elementTree object or None |
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self | Apply `processor` or `self.processor` to `self`. |
def selected_fields(self):
items = self.lstFields.selectedItems()
if items and self.mode == MULTI_MODE:
return [item.text() for item in items]
elif items and self.mode == SINGLE_MODE:
return items[0].text()
else:
return [] | Obtain the fields selected by user.
:returns: Keyword of the selected field.
:rtype: list, str |
def serialize(self, buf, offset):
fields = [ofproto.oxs_from_user(k, uv) for (k, uv)
in self.fields]
hdr_pack_str =
field_offset = offset + struct.calcsize(hdr_pack_str)
for (n, value, _) in fields:
field_offset += ofproto.oxs_serialize(n, value, None, buf,
field_offset)
reserved = 0
length = field_offset - offset
msg_pack_into(hdr_pack_str, buf, offset, reserved, length)
self.length = length
pad_len = utils.round_up(length, 8) - length
msg_pack_into("%dx" % pad_len, buf, field_offset)
return length + pad_len | Outputs the expression of the wire protocol of the flow stats into
the buf.
Returns the output length. |
def extract_commands(imported_vars):
commands = dict()
for tup in imported_vars:
name, obj = tup
if is_command_object(obj):
commands.setdefault(name, obj)
return commands | 从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object) |
def common_items_metrics(all_items, common_items):
segments = common_items.index.unique()
metrics = {}
for seg in segments:
seg_common_items = segment_common_items(seg)
projects = get_segment_projects(seg)
metric_values = []
for proj in projects:
pronac = proj[0]
percentage = common_items_percentage(pronac, seg_common_items)
metric_values.append(percentage)
metrics[seg] = {
: np.mean(metric_values),
: np.std(metric_values)
}
return pd.DataFrame.from_dict(metrics, orient=) | Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment. |
def fromtsv(source=None, encoding=None, errors=, header=None,
**csvargs):
csvargs.setdefault(, )
return fromcsv(source, encoding=encoding, errors=errors, **csvargs) | Convenience function, as :func:`petl.io.csv.fromcsv` but with different
default dialect (tab delimited). |
def _build_indices(self):
result = {key: OrderedDict() for key in LINES_WITH_ID}
for line in self.lines:
if line.key in LINES_WITH_ID:
result.setdefault(line.key, OrderedDict())
if line.mapping["ID"] in result[line.key]:
warnings.warn(
("Seen {} header more than once: {}, using first" "occurence").format(
line.key, line.mapping["ID"]
),
DuplicateHeaderLineWarning,
)
else:
result[line.key][line.mapping["ID"]] = line
else:
result.setdefault(line.key, [])
result[line.key].append(line)
return result | Build indices for the different field types |
def nb_persons(self, role = None):
if role:
if role.subroles:
role_condition = np.logical_or.reduce([self.members_role == subrole for subrole in role.subroles])
else:
role_condition = self.members_role == role
return self.sum(role_condition)
else:
return np.bincount(self.members_entity_id) | Returns the number of persons contained in the entity.
If ``role`` is provided, only the entity member with the given role are taken into account. |
def get_all(self, seq_set: SequenceSet) \
-> Sequence[Tuple[int, CachedMessage]]:
if seq_set.uid:
all_uids = seq_set.flatten(self.max_uid) & self._uids
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if uid in all_uids]
else:
all_seqs = seq_set.flatten(self.exists)
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if seq in all_seqs] | Return the cached messages, and their sequence numbers, for the
given sequence set.
Args:
seq_set: The message sequence set. |
def detect_language(self, text: str, hint: str = None):
encodedtext = urllib.parse.quote(text)
args = "&text=" + encodedtext
if hint is not None:
args += "&hint=" + hint
r = self.yandex_translate_request("detect", args)
self.handle_errors(r)
return r.json()["lang"] | Detects the language of a text
:param text:
Text to analyze
:param hint:
A list which are hints for the API
in which language the text is written in
example:
"de, en"
:return:
detected language code. example: "en" |
def add_to_cache(cls, remote_info, container):
if not isinstance(container, cls):
raise TypeError( %
(container, cls))
if remote_info in cls.__remote_info_cache:
raise KeyError()
cls.__remote_info_cache[remote_info] = container | Adds a ResourceContainer to a cache tying it to a protorpc method.
Args:
remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding
to a method.
container: An instance of ResourceContainer.
Raises:
TypeError: if the container is not an instance of cls.
KeyError: if the remote method has been reference by a container before.
This created remote method should never occur because a remote method
is created once. |
def __read(self):
self._socket.setblocking(0)
while not self._stop_event.is_set():
ready = select.select([self._socket], [], [], 1)
if ready[0]:
data, sender = self._socket.recvfrom(1024)
try:
self._handle_heartbeat(sender, data)
except Exception as ex:
_logger.exception("Error handling the heart beat: %s", ex) | Reads packets from the socket |
def __check_mecab_dict_path(self):
mecab_dic_cmd = "echo `{} --dicdir`".format(os.path.join(self._path_mecab_config, ))
try:
if six.PY2:
path_mecab_dict = subprocess.check_output( mecab_dic_cmd, shell=True ).strip()
else:
path_mecab_dict = subprocess.check_output(mecab_dic_cmd, shell=True).decode(self.string_encoding).strip()
except subprocess.CalledProcessError:
logger.error("{}".format(mecab_dic_cmd))
raise subprocess.CalledProcessError(returncode=-1, cmd="Failed to execute mecab-config command")
if path_mecab_dict == :
raise SystemError(.format(mecab_dic_cmd))
return path_mecab_dict | check path to dict of Mecab in system environment |
def get_request_headers(self):
date_header = time.asctime(time.gmtime())
auth_header = "AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s" % (
self.connection._aws_access_key_id,
signing_key,
)
return {
: auth_header,
: date_header,
: ,
} | Determine the headers to send along with the request. These are
pretty much the same for every request, with Route53. |
def make_2D_samples_gauss(n, m, sigma, random_state=None):
generator = check_random_state(random_state)
if np.isscalar(sigma):
sigma = np.array([sigma, ])
if len(sigma) > 1:
P = sp.linalg.sqrtm(sigma)
res = generator.randn(n, 2).dot(P) + m
else:
res = generator.randn(n, 2) * np.sqrt(sigma) + m
return res | return n samples drawn from 2D gaussian N(m,sigma)
Parameters
----------
n : int
number of samples to make
m : np.array (2,)
mean value of the gaussian distribution
sigma : np.array (2,2)
covariance matrix of the gaussian distribution
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : np.array (n,2)
n samples drawn from N(m,sigma) |
def define_function(self, function, name=None):
name = name if name is not None else function.__name__
ENVIRONMENT_DATA[self._env].user_functions[name] = function
self.build(DEFFUNCTION.format(name)) | Define the Python function within the CLIPS environment.
If a name is given, it will be the function name within CLIPS.
Otherwise, the name of the Python function will be used.
The Python function will be accessible within CLIPS via its name
as if it was defined via the `deffunction` construct. |
def maximum_active_partitions(self):
machine_type = self.get_property()
try:
max_parts = self._MAX_PARTITIONS_BY_MACHINE_TYPE[machine_type]
except KeyError:
raise ValueError("Unknown machine type: {!r}".format(machine_type))
return max_parts | Integer: The maximum number of active logical partitions or partitions
of this CPC.
The following table shows the maximum number of active logical
partitions or partitions by machine generations supported at the HMC
API:
========================= ==================
Machine generation Maximum partitions
========================= ==================
z196 60
z114 30
zEC12 60
zBC12 30
z13 / Emperor 85
z13s / Rockhopper 40
z14 / Emperor II 85
z14-ZR1 / Rockhopper II 40
========================= ==================
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`ValueError`: Unknown machine type |
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None):
global scm_status_cache
if config.artifact in scm_status_cache.keys():
result = scm_status_cache[config.artifact]
elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()):
result = scm_status_cache["%s|False" % config.artifact]
else:
result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params)
if read_modules:
scm_status_cache[config.artifact] = result
if ("%s|False" % config.artifact) in scm_status_cache.keys():
del(scm_status_cache["%s|False" % config.artifact])
else:
scm_status_cache["%s|False" % config.artifact] = result
return result | Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by
default, although it can be requested to read the whole available module structure.
:param config: artifact config (ArtifactConfig instance)
:param read_modules: if True all modules are read, otherwise only top-level artifact
:param repo_url: the URL of the repository to use
:param mvn_repo_local: local repository path
:param additional_params: additional params to add on command-line when running maven |
def get_keeper_token(host, username, password):
token_endpoint = urljoin(host, )
r = requests.get(token_endpoint, auth=(username, password))
if r.status_code != 200:
raise KeeperError(.
format(host, r.status_code, r.json()))
return r.json()[] | Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token. |
def format_message(self, msg):
return {: int(msg.created * 1000),
: self.format(msg),
: self.log_stream or msg.name,
: self.log_group} | format message. |
def natural_neighbor(xp, yp, variable, grid_x, grid_y):
return natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y) | Wrap natural_neighbor_to_grid for deprecated natural_neighbor function. |
def find_folder_recipes(base_folder,
pattern="Singularity",
manifest=None,
base=None):
if manifest is None:
manifest = dict()
for root, dirnames, filenames in os.walk(base_folder):
for filename in fnmatch.filter(filenames, pattern):
container_path = os.path.join(root, filename)
if base is not None:
container_base = container_path.replace(base,).strip()
collection = container_base.split()[0]
recipe = os.path.basename(container_base)
container_uri = "%s/%s" %(collection,recipe)
else:
container_uri = .join(container_path.strip().split()[-2:])
add_container = True
if container_uri in manifest:
if manifest[container_uri][] > os.path.getmtime(container_path):
add_container = False
if add_container:
manifest[container_uri] = {: os.path.abspath(container_path),
:os.path.getmtime(container_path)}
return manifest | find folder recipes will find recipes based on a particular pattern.
Parameters
==========
base_folder: the base folder to recursively walk
pattern: a default pattern to search for
manifest: an already started manifest
base: if defined, consider folders under this level recursively. |
def provider_list(self, lookup=):
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data | Return a mapping of all image data for available providers |
def list_files(self, project):
path = "/projects/{}/files".format(project.id)
res = yield from self.http_query("GET", path, timeout=120)
return res.json | List files in the project on computes |
def insert_before(self, text):
selection_state = self.selection
if selection_state:
selection_state = SelectionState(
original_cursor_position=selection_state.original_cursor_position + len(text),
type=selection_state.type)
return Document(
text=text + self.text,
cursor_position=self.cursor_position + len(text),
selection=selection_state) | Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync. |
def auto_up(self, count=1, go_to_start_of_line_if_history_changes=False):
if self.complete_state:
self.complete_previous(count=count)
elif self.document.cursor_position_row > 0:
self.cursor_up(count=count)
elif not self.selection_state:
self.history_backward(count=count)
if go_to_start_of_line_if_history_changes:
self.cursor_position += self.document.get_start_of_line_position() | If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.) |
def run(
self,
num_episodes=-1,
max_episode_timesteps=-1,
episode_finished=None,
summary_report=None,
summary_interval=0,
num_timesteps=None,
deterministic=False,
episodes=None,
max_timesteps=None,
testing=False,
sleep=None
):
if max_timesteps is not None:
max_episode_timesteps = max_timesteps
warnings.warn("WARNING: `max_timesteps` parameter is deprecated, use `max_episode_timesteps` instead.",
category=DeprecationWarning)
assert isinstance(max_episode_timesteps, int)
if summary_report is not None:
warnings.warn("WARNING: `summary_report` parameter is deprecated, use `episode_finished` callback "
"instead to generate summaries every n episodes.",
category=DeprecationWarning)
self.reset()
self.global_episode = 0
self.global_timestep = 0
self.should_stop = False
threads = [threading.Thread(target=self._run_single, args=(t, self.agent[t], self.environment[t],),
kwargs={"deterministic": deterministic,
"max_episode_timesteps": max_episode_timesteps,
"episode_finished": episode_finished,
"testing": testing,
"sleep": sleep})
for t in range(len(self.agent))]
self.start_time = time.time()
[t.start() for t in threads]
try:
next_summary = 0
next_save = 0 if self.save_frequency_unit != "s" else time.time()
while any([t.is_alive() for t in threads]) and self.global_episode < num_episodes or num_episodes == -1:
self.time = time.time()
if summary_report is not None and self.global_episode > next_summary:
summary_report(self)
next_summary += summary_interval
if self.save_path and self.save_frequency is not None:
do_save = True
current = None
if self.save_frequency_unit == "e" and self.global_episode > next_save:
current = self.global_episode
elif self.save_frequency_unit == "s" and self.time > next_save:
current = self.time
elif self.save_frequency_unit == "t" and self.global_timestep > next_save:
current = self.global_timestep
else:
do_save = False
if do_save:
self.agent[0].save_model(self.save_path)
while next_save < current:
next_save += self.save_frequency
time.sleep(1)
except KeyboardInterrupt:
print()
self.should_stop = True
[t.join() for t in threads]
print() | Executes this runner by starting all Agents in parallel (each one in one thread).
Args:
episodes (int): Deprecated; see num_episodes.
max_timesteps (int): Deprecated; see max_episode_timesteps. |
def flush_tx_buffer(self):
log.info()
self._ucan.reset_can(self.channel, ResetFlags.RESET_ONLY_TX_BUFF) | Flushes the transmit buffer.
:raises can.CanError:
If flushing of the transmit buffer failed. |
def from_agent_proto(agent_info_list, brain_params):
vis_obs = []
for i in range(brain_params.number_visual_observations):
obs = [BrainInfo.process_pixels(x.visual_observations[i],
brain_params.camera_resolutions[i][])
for x in agent_info_list]
vis_obs += [obs]
if len(agent_info_list) == 0:
memory_size = 0
else:
memory_size = max([len(x.memories) for x in agent_info_list])
if memory_size == 0:
memory = np.zeros((0, 0))
else:
[x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list]
memory = np.array([list(x.memories) for x in agent_info_list])
total_num_actions = sum(brain_params.vector_action_space_size)
mask_actions = np.ones((len(agent_info_list), total_num_actions))
for agent_index, agent_info in enumerate(agent_info_list):
if agent_info.action_mask is not None:
if len(agent_info.action_mask) == total_num_actions:
mask_actions[agent_index, :] = [
0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)]
if any([np.isnan(x.reward) for x in agent_info_list]):
logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name)
if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]):
logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name)
if len(agent_info_list) == 0:
vector_obs = np.zeros(
(0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations)
)
else:
vector_obs = np.nan_to_num(
np.array([x.stacked_vector_observation for x in agent_info_list])
)
brain_info = BrainInfo(
visual_observation=vis_obs,
vector_observation=vector_obs,
text_observations=[x.text_observation for x in agent_info_list],
memory=memory,
reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list],
agents=[x.id for x in agent_info_list],
local_done=[x.done for x in agent_info_list],
vector_action=np.array([x.stored_vector_actions for x in agent_info_list]),
text_action=[list(x.stored_text_actions) for x in agent_info_list],
max_reached=[x.max_step_reached for x in agent_info_list],
custom_observations=[x.custom_observation for x in agent_info_list],
action_mask=mask_actions
)
return brain_info | Converts list of agent infos to BrainInfo. |
def check_geophysical_vars_fill_value(self, ds):
results = []
for geo_var in get_geophysical_variables(ds):
results.append(
self._has_var_attr(ds, geo_var, , , BaseCheck.MEDIUM),
)
return results | Check that geophysical variables contain fill values.
:param netCDF4.Dataset ds: An open netCDF dataset |
def draw_summary(self, history, title=""):
time_str = str(history.get_total_time()).split(".")[0]
summary = "Step: {} Time: {}".format(history.step, time_str)
if title:
summary = title + "\n\n" + summary
self.figure.suptitle(summary) | Inserts a text summary at the top that lists the number of steps and total
training time. |
def create_set_cmap(values, cmap_name, alpha=255):
unique_values = list(set(values))
shuffle(unique_values)
from pylab import get_cmap
cmap = get_cmap(cmap_name)
d = {}
for i in range(len(unique_values)):
d[unique_values[i]] = _convert_color_format(cmap(1.*i/len(unique_values)), alpha)
return d | return a dict of colors corresponding to the unique values
:param values: values to be mapped
:param cmap_name: colormap name
:param alpha: color alpha
:return: dict of colors corresponding to the unique values |
def handle_error(err, halt=True):
print(.format(c.Style.BRIGHT, c.Fore.RED, err))
if halt:
sys.exit(1) | Print errors message and optionally exit.
Args:
err (str): The error message to print.
halt (bool, optional): Defaults to True. If True the script will exit. |
def get_hwclock():
*
if salt.utils.path.which():
ret = _timedatectl()
for line in (x.strip() for x in ret[].splitlines()):
if in line.lower():
try:
if line.split()[-1].strip().lower() == :
return
else:
return
except IndexError:
pass
msg = (
).format(ret[])
raise CommandExecutionError(msg)
else:
os_family = __grains__[]
for family in (, , ):
if family in os_family:
return _get_adjtime_timezone()
if in __grains__[]:
try:
with salt.utils.files.fopen(, ) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r, line):
continue
if in line:
is_utc = line.rstrip().split()[-1].lower()
if is_utc == :
return
else:
return
except IOError as exc:
pass
return _get_adjtime_timezone()
if in __grains__[]:
if not os.path.exists():
offset_file =
try:
with salt.utils.files.fopen(offset_file, ) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith():
line = line.rstrip()
line = line.split()[-1].strip("UTClocalLOCALCorrect offset value not found in {0}Problem reading offset file {0}: {1}Solarisos_family/etc/rtc_configrzone_info=GMTUTClocaltimeUTCProblem reading offset file {0}: {1}AIXos_family/etc/environmentrTZ=UTCUTClocaltimeUTCProblem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
) | Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock |
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_flogi(self, **kwargs):
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop()
fcoe_intf_rx_flogi = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-flogi")
fcoe_intf_rx_flogi.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def _build_cached_instances(self):
connection = self._connect()
reservations = connection.get_all_reservations()
cached_instances = {}
for rs in reservations:
for vm in rs.instances:
cached_instances[vm.id] = vm
return cached_instances | Build lookup table of VM instances known to the cloud provider.
The returned dictionary links VM id with the actual VM object. |
def add_file(self, *args):
for file_path in args:
self.files.append(FilePath(file_path, self)) | Add single file or list of files to bundle
:type: file_path: str|unicode |
def reset_passwd(self, data):
error = False
msg = ""
if len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
user = self.database.users.find_one_and_update({"reset": data["reset_hash"]},
{"$set": {"password": passwd_hash},
"$unset": {"reset": True, "activate": True}})
if user is None:
error = True
msg = _("Invalid reset hash.")
else:
msg = _("Your password has been successfully changed.")
return msg, error | Reset the user password |
def setup_dirs(data):
pdir = os.path.realpath(data.paramsdict["project_dir"])
data.dirs.clusts = os.path.join(pdir, "{}_clust_{}"\
.format(data.name, data.paramsdict["clust_threshold"]))
if not os.path.exists(data.dirs.clusts):
os.mkdir(data.dirs.clusts)
data.tmpdir = os.path.abspath(os.path.expanduser(
os.path.join(pdir, data.name+)))
if not os.path.exists(data.tmpdir):
os.mkdir(data.tmpdir)
if not data.paramsdict["assembly_method"] == "denovo":
data.dirs.refmapping = os.path.join(pdir, "{}_refmapping".format(data.name))
if not os.path.exists(data.dirs.refmapping):
os.mkdir(data.dirs.refmapping) | sets up directories for step3 data |
def get(self, name, default, allow_default=True):
if not self.settings.get(, False):
name = name.lower()
if name not in self.settings:
if not allow_default:
raise LookupError(.format(name=name))
self.settings[name] = default
return self.settings[name] | Return a setting value.
:param str name: Setting key name.
:param default: Default value of setting if it's not explicitly
set.
:param bool allow_default: If true, use the parameter default as
default if the key is not set, else raise
:exc:`LookupError`
:raises: :exc:`LookupError` if allow_default is false and the setting is
not set. |
def weighted_median(data, weights=None):
if weights is None:
return median(data)
midpoint = 0.5 * sum(weights)
if any([j > midpoint for j in weights]):
return data[weights.index(max(weights))]
if any([j > 0 for j in weights]):
sorted_data, sorted_weights = zip(*sorted(zip(data, weights)))
cumulative_weight = 0
below_midpoint_index = 0
while cumulative_weight <= midpoint:
below_midpoint_index += 1
cumulative_weight += sorted_weights[below_midpoint_index-1]
cumulative_weight -= sorted_weights[below_midpoint_index-1]
if cumulative_weight - midpoint < sys.float_info.epsilon:
bounds = sorted_data[below_midpoint_index-2:below_midpoint_index]
return sum(bounds) / float(len(bounds))
return sorted_data[below_midpoint_index-1] | Calculate the weighted median of a list. |
def import_emails(self, archives_path, all, exclude_lists=None):
count = 0
email_generator = self.get_emails(archives_path, all, exclude_lists)
for mailinglist_name, msg, index in email_generator:
try:
self.save_email(mailinglist_name, msg, index)
except:
transaction.rollback()
raise
count += 1
if count % 1000 == 0:
transaction.commit()
transaction.commit() | Get emails from the filesystem from the `archives_path`
and store them into the database. If `all` is set to True all
the filesystem storage will be imported otherwise the
importation will resume from the last message previously
imported. The lists set in `exclude_lists` won't be imported. |
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
stream = proc_or_stream
if not hasattr(stream, ):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
hexsha, _ = line.split(None, 1)
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
if hasattr(proc_or_stream, ):
finalize_process(proc_or_stream) | Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects |
def _update(self, **kwargs):
requests_params, update_uri, session, read_only = \
self._prepare_put_or_patch(kwargs)
read_only_mutations = []
for attr in read_only:
if attr in kwargs:
read_only_mutations.append(attr)
if read_only_mutations:
msg = \
% read_only_mutations
raise AttemptedMutationOfReadOnly(msg)
time.sleep(1)
continue
else:
raise | wrapped with update, override that in a subclass to customize |
def get_or_create_time_series(self, label_values):
if label_values is None:
raise ValueError
if any(lv is None for lv in label_values):
raise ValueError
if len(label_values) != self._len_label_keys:
raise ValueError
return self._get_or_create_time_series(label_values) | Get a mutable measurement for the given set of label values.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:rtype: :class:`GaugePointLong`, :class:`GaugePointDouble`
:class:`opencensus.metrics.export.cumulative.CumulativePointLong`,
or
:class:`opencensus.metrics.export.cumulative.CumulativePointDouble`
:return: A mutable point that represents the last value of the
measurement. |
def get_resources(self, ids, cache=True):
if ids[0].startswith():
params = {: ids}
else:
params = {: ids}
return self.query.filter(self.manager, **params) | Support server side filtering on arns or names |
def addresses_for_key(gpg, key):
return [address.split("<")[-1].strip(">")
for address in gpg.list_keys().key_map[key[]]["uids"]
if address] | Takes a key and extracts the email addresses for it. |
def get_descriptor_defaults(self, api_info, hostname=None):
hostname = (hostname or endpoints_util.get_app_hostname() or
api_info.hostname)
protocol = if ((hostname and hostname.startswith()) or
endpoints_util.is_running_on_devserver()) else
base_path = api_info.base_path.strip()
defaults = {
: ,
: .format(protocol, hostname, base_path),
: api_info.name,
: api_info.api_version,
: api_info.api_version,
: api_info.path_version,
: True,
: False,
: {
: .format(protocol, hostname, base_path),
: ,
: 10.0
}
}
if api_info.canonical_name:
defaults[] = api_info.canonical_name
if api_info.owner_domain:
defaults[] = api_info.owner_domain
if api_info.owner_name:
defaults[] = api_info.owner_name
if api_info.package_path:
defaults[] = api_info.package_path
if api_info.title:
defaults[] = api_info.title
if api_info.documentation:
defaults[] = api_info.documentation
return defaults | Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration. |
def convert_convolution(builder, layer, input_names, output_names, keras_layer):
_check_data_format(keras_layer)
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
is_deconv = isinstance(keras_layer,
_keras.layers.convolutional.Conv2DTranspose)
weightList = keras_layer.get_weights()
if is_deconv:
height, width, n_filters, channels = weightList[0].shape
W = weightList[0].transpose([0,1,3,2])
try:
output_blob_shape = list(filter(None, keras_layer.output_shape))
output_shape = output_blob_shape[:-1]
except:
output_shape = None
else:
height, width, channels, n_filters = weightList[0].shape
W = weightList[0]
output_shape = None
b = weightList[1] if has_bias else None
output_channels = n_filters
stride_height, stride_width = keras_layer.strides
dilations = [1,1]
if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple):
dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate]
if is_deconv and not dilations == [1,1]:
raise ValueError("Unsupported non-unity dilation for Deconvolution layer")
groups = 1
kernel_channels = channels
if isinstance(keras_layer, DepthwiseConv2D):
groups = channels
kernel_channels = 1
depth_multiplier = keras_layer.depth_multiplier
W = _np.reshape(W,(height, width,1,channels * depth_multiplier))
output_channels = channels * depth_multiplier
builder.add_convolution(name = layer,
kernel_channels = kernel_channels,
output_channels = output_channels,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
border_mode = keras_layer.padding,
groups = groups,
W = W,
b = b,
has_bias = has_bias,
is_deconv = is_deconv,
output_shape = output_shape,
input_name = input_name,
output_name = output_name,
dilation_factors = dilations) | Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. |
def _superclasses_for_subject(self, graph, typeof):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes | helper, returns a list of all superclasses of a given class |
def all_examples(self, pred_name=None):
target = self.db.target_table
pred_name = pred_name if pred_name else target
examples = self.db.rows(target, [self.db.target_att, self.db.pkeys[target]])
return .join(["%s(%s, %s)." % (pred_name, ILPConverter.fmt_col(cls), pk) for cls, pk in examples]) | Emits all examples in prolog form for RSD.
:param pred_name: override for the emitted predicate name |
def render(self, template: str, **vars) -> str:
vars.setdefault(, self._ctx)
return self._renderer.render(template, **vars) | Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results |
def diff_dumps(ih1, ih2, tofile=None, name1="a", name2="b", n_context=3):
def prepare_lines(ih):
sio = StringIO()
ih.dump(sio)
dump = sio.getvalue()
lines = dump.splitlines()
return lines
a = prepare_lines(ih1)
b = prepare_lines(ih2)
import difflib
result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm=))
if tofile is None:
tofile = sys.stdout
output = .join(result)+
tofile.write(output) | Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output |
def setup_graph(self):
all_vars = tfv1.global_variables() + tfv1.local_variables()
for v in all_vars:
if v.name == self.var_name:
self.var = v
break
else:
raise ValueError("{} is not a variable in the graph!".format(self.var_name)) | Will setup the assign operator for that variable. |
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding=, default=None, sort_keys=False, **kw):
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == and default is None and not sort_keys and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = DSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, sort_keys=sort_keys, **kw).iterencode(obj)
for chunk in iterable:
fp.write(chunk) | Serialize ``obj`` as a DSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
output are escaped with ``\\uXXXX`` sequences, and the result is a ``str``
instance consisting of ASCII characters only. If ``ensure_ascii`` is
``False``, some chunks written to ``fp`` may be ``unicode`` instances.
This usually happens because the input contains unicode strings or the
``encoding`` parameter is used. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the DSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then DSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``('and ', 'is ')`` separators.
``('and', 'is')`` is the most compact DSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``DSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``DSONEncoder`` is used. |
def list_scores(self, update_keys, session=None, lightweight=None):
params = clean_locals(locals())
method = % (self.URI, )
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Score, elapsed_time, lightweight) | Returns a list of current scores for the given events.
:param list update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Score] |
def visit_lambda(self, node):
if node.args.defaults:
return
call = node.body
if not isinstance(call, astroid.Call):
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node) | check whether or not the lambda is suspicious |
def remove(self, option):
for existing_option in self._options:
if existing_option[1] == option:
self._options.remove(existing_option)
self._refresh_options()
return True
return False | Removes the first `option` from the ButtonGroup.
Returns `True` if an item was removed.
:param string option:
The value of the option to remove from the ButtonGroup. |
def getsecret(self, section, option, **kwargs):
raw = kwargs.get(, False)
value = self.get(section, option, **kwargs)
if raw:
return value
return self.custodia_client.get_secret(value) | Get a secret from Custodia |
def safe_get(self, section, key):
try:
return configparser.RawConfigParser.get(self, section, key)
except (configparser.NoSectionError,
configparser.NoOptionError):
return None | Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time. |
def create_meta_main(create_path, config, role, categories):
meta_file = c.DEFAULT_META_FILE.replace(
"%author_name", config["author_name"])
meta_file = meta_file.replace(
"%author_company", config["author_company"])
meta_file = meta_file.replace("%license_type", config["license_type"])
meta_file = meta_file.replace("%role_name", role)
if not categories:
categories = ""
meta_file = meta_file.replace("%categories", categories)
string_to_file(create_path, meta_file) | Create a meta template. |
def _pusher_connect_handler(self, data):
self.channel = self.pusher.subscribe(self.pos_callback_chan)
for listener in self.pusher_connected_listeners:
listener(data) | Event handler for the connection_established event. Binds the
shortlink_scanned event |
def t_NAME(self,t):
t.type = KEYWORDS[t.value.lower()] if t.value.lower() in KEYWORDS else if t.value.lower() in (,) else
return t | [A-Za-z]\w*|\"char\" |
def write(self, data, **keys):
slow = keys.get(, False)
isrec = False
if isinstance(data, (list, dict)):
if isinstance(data, list):
data_list = data
columns_all = keys.get(, None)
if columns_all is None:
columns_all = keys.get(, None)
if columns_all is None:
raise ValueError(
"you must send columns with a list of arrays")
else:
columns_all = list(data.keys())
data_list = [data[n] for n in columns_all]
colnums_all = [self._extract_colnum(c) for c in columns_all]
names = [self.get_colname(c) for c in colnums_all]
isobj = numpy.zeros(len(data_list), dtype=numpy.bool)
for i in xrange(len(data_list)):
isobj[i] = is_object(data_list[i])
else:
if data.dtype.fields is None:
raise ValueError("You are writing to a table, so I expected "
"an array with fields as input. If you want "
"to write a simple array, you should use "
"write_column to write to a single column, "
"or instead write to an image hdu")
if data.shape is ():
raise ValueError("cannot write data with shape ()")
isrec = True
names = data.dtype.names
isobj = fields_are_object(data)
data_list = []
colnums_all = []
for i, name in enumerate(names):
colnum = self._extract_colnum(name)
data_list.append(data[name])
colnums_all.append(colnum)
if slow:
for i, name in enumerate(names):
if not isobj[i]:
self.write_column(name, data_list[i], **keys)
else:
nonobj_colnums = []
nonobj_arrays = []
for i in xrange(len(data_list)):
if not isobj[i]:
nonobj_colnums.append(colnums_all[i])
if isrec:
colref = array_to_native(data_list[i], inplace=False)
else:
colref = array_to_native_c(data_list[i], inplace=False)
if IS_PY3 and colref.dtype.char == :
colref = colref.astype(, copy=False)
nonobj_arrays.append(colref)
for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays):
self._verify_column_data(tcolnum, tdata)
if len(nonobj_arrays) > 0:
firstrow = keys.get(, 0)
self._FITS.write_columns(
self._ext+1, nonobj_colnums, nonobj_arrays,
firstrow=firstrow+1, write_bitcols=self.write_bitcols)
for i, name in enumerate(names):
if isobj[i]:
self.write_var_column(name, data_list[i], **keys)
self._update_info() | Write data into this HDU
parameters
----------
data: ndarray or list of ndarray
A numerical python array. Should be an ordinary array for image
HDUs, should have fields for tables. To write an ordinary array to
a column in a table HDU, use write_column. If data already exists
in this HDU, it will be overwritten. See the append(() method to
append new rows to a table HDU.
firstrow: integer, optional
At which row you should begin writing to tables. Be sure you know
what you are doing! For appending see the append() method.
Default 0.
columns: list, optional
If data is a list of arrays, you must send columns as a list
of names or column numbers
You can also send names=
names: list, optional
same as columns= |
def publish_wp(site_name, output_file, resources, args):
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods.media import UploadFile, GetMediaLibrary
from wordpress_xmlrpc.methods.posts import NewPost, EditPost, GetPost
url, user, password = get_site_config(site_name)
meta = {}
for r in resources:
if r.endswith():
with open(r) as f:
meta = json.load(f)
fm = meta.get(,{})
if not in fm or not fm[]:
err("Canidentifiertitleslugpost_tagtagscategorycategoriesidentifieridentifierexcerptbriefdescription\-\d+$fileidentifierslugname.pngidlinklinkfeatured_imagefeatured_imagefeatured_imagethumbnailattachment_id']
post.content = content
r = wp.call(EditPost(post.id, post))
return r, wp.call(GetPost(post.id)) | Publish a notebook to a wordpress post, using Gutenberg blocks.
Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter'
show_input: hide
github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb
identifier: 5c987397-a954-46ca-8743-bdcd7a71579c
featured_image: 171
authors:
- email: eric@civicknowledge.com
name: Eric Busboom
organization: Civic Knowledge
type: wrangler
tags:
- Tag1
- Tag2
categories:
- Demographics
- Tutorial
'Featured_image' is an attachment id |
def blake2b(data, digest_size=BLAKE2B_BYTES, key=b,
salt=b, person=b,
encoder=nacl.encoding.HexEncoder):
digest = _b2b_hash(data, digest_size=digest_size, key=key,
salt=salt, person=person)
return encoder.encode(digest) | Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes |
def load_df_from_file(file_path, sep=",", header=0):
with tf.gfile.Open(file_path) as infile:
df = pd.read_csv(infile, sep=sep, header=header)
return df | Wrapper around pandas' read_csv. |
def set_representative_sequence(self, force_rerun=False):
if len(self.sequences) == 0:
log.error(.format(self.id))
return self.representative_sequence
kegg_mappings = self.filter_sequences(KEGGProp)
if len(kegg_mappings) > 0:
kegg_to_use = kegg_mappings[0]
if len(kegg_mappings) > 1:
log.warning(.format(self.id, kegg_to_use.id))
uniprot_mappings = self.filter_sequences(UniProtProp)
if self.representative_sequence and not force_rerun:
log.debug(.format(self.id))
elif len(kegg_mappings) > 0 and len(uniprot_mappings) == 0:
self.representative_sequence = kegg_to_use
log.debug(.format(self.id, kegg_to_use.id))
elif len(kegg_mappings) == 0 and len(uniprot_mappings) > 0:
u_ranker = []
for u in uniprot_mappings:
u_ranker.append((u.id, u.ranking_score()))
sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)
best_u_id = sorted_by_second[0][0]
best_u = uniprot_mappings.get_by_id(best_u_id)
self.representative_sequence = best_u
log.debug(.format(self.id, best_u_id))
elif len(kegg_mappings) > 0 and len(uniprot_mappings) > 0:
if kegg_to_use.num_pdbs > 0 and not uniprot_mappings.has_id(kegg_to_use.uniprot):
self.representative_sequence = kegg_to_use
log.debug(.format(self.id, kegg_to_use.id))
else:
u_ranker = []
for u in uniprot_mappings:
u_ranker.append((u.id, u.ranking_score()))
sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)
best_u_id = sorted_by_second[0][0]
best_u = uniprot_mappings.get_by_id(best_u_id)
self.representative_sequence = best_u
log.debug(.format(self.id, best_u_id))
return self.representative_sequence | Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative
sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
Returns:
SeqProp: Which sequence was set as representative |
def udot(op1, op2):
dot = np.dot(op1.d, op2.d)
units = op1.units * op2.units
if dot.shape == ():
return unyt_quantity(dot, units)
return unyt_array(dot, units) | Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
Examples
--------
>>> from unyt import km, s
>>> a = np.eye(2)*km
>>> b = (np.ones((2, 2)) * 2)*s
>>> print(udot(a, b))
[[2. 2.]
[2. 2.]] km*s |
def replace(self, v):
if self.popsize < self._popsize:
return self.add(v)
k = self.tournament(negative=True)
self.clean(self.population[k])
self.population[k] = v
v.position = len(self._hist)
self._hist.append(v)
self.bsf = v
self.estopping = v
self._inds_replace += 1
self._density += self.get_density(v)
if self._inds_replace == self._popsize:
self._inds_replace = 0
self.generation += 1
gc.collect() | Replace an individual selected by negative tournament selection with
individual v |
def get_notebook_name() -> str:
kernel_id = re.search(
,
ipykernel.connect.get_connection_file()
).group(1)
servers = list_running_servers()
for server in servers:
response = requests.get(urljoin(server[], ),
params={: server.get(, )})
for session in json.loads(response.text):
if session[][] == kernel_id:
relative_path = session[][]
return pjoin(server[], relative_path)
raise Exception() | Return the full path of the jupyter notebook.
References
----------
https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246 |
def string(self, *pattern, **kwargs):
self.pattern(self.build_string(*pattern, **kwargs))
return self | Add string pattern
:param pattern:
:type pattern:
:return: self
:rtype: Rebulk |
def draw_dot(self, pos, color):
if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height:
self.matrix[pos[0]][pos[1]] = color | Draw one single dot with the given color on the screen.
:param pos: Position of the dot
:param color: COlor for the dot
:type pos: tuple
:type color: tuple |
def PrimaryDatacenter(self):
return(clc.v2.Datacenter(alias=self.alias,location=self.data[], session=self.session)) | Returns the primary datacenter object associated with the account.
>>> clc.v2.Account(alias='BTDI').PrimaryDatacenter()
<clc.APIv2.datacenter.Datacenter instance at 0x10a45ce18>
>>> print _
WA1 |
def begin(self, sql=None):
self._transaction = True
try:
begin = self._con.begin
except AttributeError:
return self._con.query(sql or )
else:
if sql:
return begin(sql=sql)
else:
return begin() | Begin a transaction. |
def OnMeasureItem(self, item):
item_name = self.GetItems()[item]
return icons[item_name].GetHeight() | Returns the height of the items in the popup |
def _clean_data(cls, *args, **kwargs):
datadict = cls.clean(*args, **kwargs)
if in datadict:
data = datadict[]
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == :
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data | Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays). |
def summary(self):
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. |
def iter_intersecting(self, iterable, key=None, descending=False):
return _ContainsVersionIterator(self, iterable, key, descending,
mode=_ContainsVersionIterator.MODE_INTERSECTING) | Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect. |
def cume_dist(expr, sort=None, ascending=True):
return _rank_op(expr, CumeDist, types.float64, sort=sort, ascending=ascending) | Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column |
def contains_points(intersector,
points,
check_direction=None):
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError()
mask = inside_aabb.copy()
mask[mask] = agree
contains[mask] = bi_contains[0][agree]
one_freespace = (bi_hits == 0).any(axis=0)
broken = np.logical_and(np.logical_not(agree),
np.logical_not(one_freespace))
if not broken.any():
return contains
new_direction = util.unitize(np.random.random(3) - .5)
mask = inside_aabb.copy()
mask[mask] = broken
contains[mask] = contains_points(
intersector,
points[inside_aabb][broken],
check_direction=new_direction)
constants.log.debug(
,
broken.sum())
return contains | Check if a mesh contains a set of points, using ray tests.
If the point is on the surface of the mesh, behavior is
undefined.
Parameters
---------
mesh: Trimesh object
points: (n,3) points in space
Returns
---------
contains : (n) bool
Whether point is inside mesh or not |
def _bool_method_SERIES(cls, op, special):
op_name = _get_op_name(op, special)
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
assert not isinstance(y, (list, ABCSeries, ABCIndexClass))
if isinstance(y, np.ndarray):
assert not (is_bool_dtype(x) and is_bool_dtype(y))
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
assert lib.is_scalar(y)
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except (TypeError, ValueError, AttributeError,
OverflowError, NotImplementedError):
raise TypeError("cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]"
.format(dtype=x.dtype,
typ=type(y).__name__))
return result
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
self, other = _align_method_SERIES(self, other, align_asobject=True)
res_name = get_op_result_name(self, other)
if isinstance(other, ABCDataFrame):
return NotImplemented
elif isinstance(other, (ABCSeries, ABCIndexClass)):
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
ovalues = other.values
finalizer = lambda x: x
else:
is_other_int_dtype = is_integer_dtype(np.asarray(other))
if is_list_like(other) and not isinstance(other, np.ndarray):
other = construct_1d_object_array_from_listlike(other)
ovalues = other
finalizer = lambda x: x.__finalize__(self)
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
res_values = na_op(self.values, ovalues)
unfilled = self._constructor(res_values,
index=self.index, name=res_name)
filled = filler(unfilled)
return finalizer(filled)
wrapper.__name__ = op_name
return wrapper | Wrapper function for Series arithmetic operations, to avoid
code duplication. |
def get_parameter_p_value_too_high_warning(
model_type, model_params, parameter, p_value, maximum_p_value
):
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings | Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. |
def sweep(port, rate, ID, retry=3):
if port == :
s = ServoSerial(port, rate, fake=True)
else:
s = ServoSerial(port, rate)
if ID < 0:
ID = xl320.XL320_BROADCAST_ADDR
try:
s.open()
except SerialException as e:
print(*40)
print(sys.argv[0], )
print(e)
exit(1)
pkt = makePingPacket(ID)
s.write(pkt)
for cnt in range(retry):
ans = s.read()
if ans:
for pkt in ans:
servo = packetToDict(pkt)
utils.prettyPrintPacket(servo)
print(.format(pkt))
else:
print(.format(cnt))
time.sleep(0.1)
s.close() | Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ... |
def cache_key(*args, **kwargs):
key = ""
for arg in args:
if callable(arg):
key += ":%s" % repr(arg)
else:
key += ":%s" % str(arg)
return key | Base method for computing the cache key with respect to the given
arguments. |
def add_column_xsd(self, tb, column, attrs):
if column.nullable:
attrs[] = str(0)
attrs[] =
for cls, xsd_type in six.iteritems(self.SIMPLE_XSD_TYPES):
if isinstance(column.type, cls):
attrs[] = xsd_type
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, Geometry):
geometry_type = column.type.geometry_type
xsd_type = self.SIMPLE_GEOMETRY_XSD_TYPES[geometry_type]
attrs[] = xsd_type
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Enum):
with tag(tb, , attrs) as tb:
with tag(tb, ) as tb:
with tag(tb, , {: }) \
as tb:
for enum in column.type.enums:
with tag(tb, , {: enum}):
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Numeric):
if column.type.scale is None and column.type.precision is None:
attrs[] =
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, , attrs) as tb:
with tag(tb, ) as tb:
with tag(tb, ,
{: }) as tb:
if column.type.scale is not None:
with tag(tb, ,
{: str(column.type.scale)}) \
as tb:
pass
if column.type.precision is not None:
precision = column.type.precision
with tag(tb, ,
{: str(precision)}) \
as tb:
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.String) \
or isinstance(column.type, sqlalchemy.Text) \
or isinstance(column.type, sqlalchemy.Unicode) \
or isinstance(column.type, sqlalchemy.UnicodeText):
if column.type.length is None:
attrs[] =
with tag(tb, , attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, , attrs) as tb:
with tag(tb, ) as tb:
with tag(tb, ,
{: }) as tb:
with tag(tb, ,
{: str(column.type.length)}):
pass
self.element_callback(tb, column)
return tb
raise UnsupportedColumnTypeError(column.type) | Add the XSD for a column to tb (a TreeBuilder) |
def view_package_path(self, package: str) -> _PATH:
if package not in self.view_packgets_list():
raise NoSuchPackageException(
f)
output, _ = self._execute(
, self.device_sn, , , , package)
return output[8:-1] | Print the path to the APK of the given. |
def transform(self, image_feature, bigdl_type="float"):
callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature)
return image_feature | transform ImageFeature |
def birch(args):
p = OptionParser(birch.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 2:
sys.exit(not p.print_help())
seqids, layout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
K = Karyotype(fig, root, seqids, layout)
L = K.layout
xs = .79
dt = dict(rectangle=False, circle=False)
coords = {}
coords["Amborella"] = (xs, L[0].y)
coords["Vitis"] = (xs, L[1].y)
coords["Prunus"] = (xs, L[2].y)
coords["Betula"] = (xs, L[3].y)
coords["Populus"] = (xs, L[4].y)
coords["Arabidopsis"] = (xs, L[5].y)
coords["fabids"] = join_nodes(root, coords, "Prunus", "Betula", xs, **dt)
coords["malvids"] = join_nodes(root, coords, \
"Populus", "Arabidopsis", xs, **dt)
coords["rosids"] = join_nodes(root, coords, "fabids", "malvids", xs, **dt)
coords["eudicots"] = join_nodes(root, coords, "rosids", "Vitis", xs, **dt)
coords["angiosperm"] = join_nodes(root, coords, \
"eudicots", "Amborella", xs, **dt)
branch_length(root, coords["Amborella"], coords["angiosperm"], ">160.0")
branch_length(root, coords["eudicots"], coords["angiosperm"],
">78.2", va="top")
branch_length(root, coords["Vitis"], coords["eudicots"], "138.5")
branch_length(root, coords["rosids"], coords["eudicots"],
"19.8", va="top")
branch_length(root, coords["Prunus"], coords["fabids"],
"104.2", ha="right", va="top")
branch_length(root, coords["Arabidopsis"], coords["malvids"],
"110.2", va="top")
branch_length(root, coords["fabids"], coords["rosids"],
"19.8", ha="right", va="top")
branch_length(root, coords["malvids"], coords["rosids"],
"8.5", va="top")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "birch"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog birch seqids layout
Plot birch macro-synteny, with an embedded phylogenetic tree to the right. |
def port_bindings(self, value):
if isinstance(value, (list, dict)):
self._port_bindings = self._convert_port_bindings(value)
elif value is None:
self._port_bindings = None
else:
raise TypeError(.format(type(value))) | {
u'8080/tcp': [
{
u'host_port': u'8080',
u'host_ip': u''
}
]
} |
def remove(cls, repo, name):
repo.git.remote("rm", name)
if isinstance(name, cls):
name._clear_cache()
return name | Remove the remote with the given name
:return: the passed remote name to remove |
def richtext_filters(content):
for filter_name in settings.RICHTEXT_FILTERS:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content | Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting. |
def hidelist(self, window_name, object_name):
object_handle = self._get_object_handle(window_name, object_name)
object_handle.activate()
object_handle.sendKey(AXKeyCodeConstants.ESCAPE)
return 1 | Hide combo box list / menu
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer |
def encode_pdf_date(d: datetime) -> str:
s = "{:04d}".format(d.year)
s += d.strftime(r)
tz = d.strftime()
if tz:
sign, tz_hours, tz_mins = tz[0], tz[1:3], tz[3:5]
s += "{}{}".format(sign, tz_hours, tz_mins)
return s | Encode Python datetime object as PDF date string
From Adobe pdfmark manual:
(D:YYYYMMDDHHmmSSOHH'mm')
D: is an optional prefix. YYYY is the year. All fields after the year are
optional. MM is the month (01-12), DD is the day (01-31), HH is the
hour (00-23), mm are the minutes (00-59), and SS are the seconds
(00-59). The remainder of the string defines the relation of local
time to GMT. O is either + for a positive difference (local time is
later than GMT) or - (minus) for a negative difference. HH' is the
absolute value of the offset from GMT in hours, and mm' is the
absolute value of the offset in minutes. If no GMT information is
specified, the relation between the specified time and GMT is
considered unknown. Regardless of whether or not GMT
information is specified, the remainder of the string should specify
the local time. |
def fgrad_y(self, y, return_precalc=False):
d = self.d
mpsi = self.psi
S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T
R = np.tanh(S)
D = 1 - (R ** 2)
GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T
if return_precalc:
return GRAD, S, R, D
return GRAD | gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff |
def instance_of(klass, arg):
if not isinstance(arg, klass):
raise com.IbisTypeError(
.format(
type(arg), klass
)
)
return arg | Require that a value has a particular Python type. |
def get_tab_tip(self, filename, is_modified=None, is_readonly=None):
text = u"%s — %s"
text = self.__modified_readonly_title(text,
is_modified, is_readonly)
if self.tempfile_path is not None\
and filename == encoding.to_unicode_from_fs(self.tempfile_path):
temp_file_str = to_text_string(_("Temporary file"))
return text % (temp_file_str, self.tempfile_path)
else:
return text % (osp.basename(filename), osp.dirname(filename)) | Return tab menu title |
def from_array(array):
if array is None or not array:
return None
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data[] = int(array.get()) if array.get() is not None else None
data[] = int(array.get()) if array.get() is not None else None
data[] = array
return ResponseParameters(**data) | Deserialize a new ResponseParameters from a given dictionary.
:return: new ResponseParameters instance.
:rtype: ResponseParameters |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.