code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def calculate_splits(sdf_file, split_size):
counts = _sdfstats(sdf_file)["counts"]
splits = []
cur = 0
for i in range(counts // split_size + (0 if counts % split_size == 0 else 1)):
splits.append("%s-%s" % (cur, min(counts, cur + split_size)))
cur += split_size
return splits | Retrieve |
def _rapply(input_layer, operation, *op_args, **op_kwargs):
op_args = list(op_args)
op_args.append(input_layer.tensor)
return input_layer.with_tensor(operation(*op_args, **op_kwargs)) | Applies the given operation to this after expanding op_args.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied. |
def _generic_convert_string(v, from_type, to_type, encoding):
assert six.PY2, "This function should be used with Python 2 only"
assert from_type != to_type
if from_type == six.binary_type and isinstance(v, six.binary_type):
return six.text_type(v, encoding)
elif from_type == six.text_type and isinstance(v, six.text_type):
return v.encode(encoding)
elif isinstance(v, (list, tuple, set)):
return type(v)([_generic_convert_string(element, from_type, to_type, encoding) for element in v])
elif isinstance(v, dict):
return {k: _generic_convert_string(v, from_type, to_type, encoding) for k, v in v.iteritems()}
return v | Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent,
with string values converted to given 'to_type' (str or unicode).
This method must be used with Python 2 interpreter only.
:param v: The value to convert
:param from_type: The original string type to convert
:param to_type: The target string type to convert to
:param encoding: When
:return: |
def _generate_enumerated_subtypes_tag_mapping(self, ns, data_type):
assert data_type.has_enumerated_subtypes()
tag_to_subtype_items = []
for tags, subtype in data_type.get_all_subtypes_with_tags():
tag_to_subtype_items.append("{}: {}".format(
tags,
generate_validator_constructor(ns, subtype)))
self.generate_multiline_list(
tag_to_subtype_items,
before=.format(data_type.name),
delim=(, ),
compact=False)
data_type.name, data_type.is_catch_all()))
self.emit() | Generates attributes needed for serializing and deserializing structs
with enumerated subtypes. These assignments are made after all the
Python class definitions to ensure that all references exist. |
def row(self):
row = OrderedDict()
row[] = self.retro_game_id
row[] = self.game_type
row[] = self.game_type_des
row[] = self.st_fl
row[] = self.regseason_fl
row[] = self.playoff_fl
row[] = self.local_game_time
row[] = self.game_id
row[] = self.home_team_id
row[] = self.home_team_lg
row[] = self.away_team_id
row[] = self.away_team_lg
row[] = self.home_team_name
row[] = self.away_team_name
row[] = self.home_team_name_full
row[] = self.away_team_name_full
row[] = self.interleague_fl
row[] = self.park_id
row[] = self.park_name
row[] = self.park_loc
return row | Game Dataset(Row)
:return: {
'retro_game_id': Retrosheet Game id
'game_type': Game Type(S/R/F/D/L/W)
'game_type_des': Game Type Description
(Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series)
'st_fl': Spring Training FLAG(T or F)
'regseason_fl': Regular Season FLAG(T or F)
'playoff_fl': Play Off Flag(T or F)
'local_game_time': Game Time(UTC -5)
'game_id': Game Id
'home_team_id': Home Team Id
'home_team_lg': Home Team league(AL or NL)
'away_team_id': Away Team Id
'away_team_lg': Away Team league(AL or NL)
'home_team_name': Home Team Name
'away_team_name': Away Team Name
'home_team_name_full': Home Team Name(Full Name)
'away_team_name_full': Away Team Name(Full Name)
'interleague_fl': Inter League Flag(T or F)
'park_id': Park Id
'park_name': Park Name
'park_loc': Park Location
} |
def get(self, server):
if isinstance(server, int):
if server >= len(self.sessions):
return None
else:
return self.sessions[server]
index = self._get_index(server)
if index == -1:
return None
else:
return self.sessions[index] | Returns a registered GPServer object with a matching GenePattern server url or index
Returns None if no matching result was found
:param server:
:return: |
def media(self, uri):
try:
local_path, _ = urllib.request.urlretrieve(uri)
metadata = mutagen.File(local_path, easy=True)
if metadata.tags:
self._tags = metadata.tags
title = self._tags.get(TAG_TITLE, [])
self._manager[ATTR_TITLE] = title[0] if len(title) else
artist = self._tags.get(TAG_ARTIST, [])
self._manager[ATTR_ARTIST] = artist[0] if len(artist) else
album = self._tags.get(TAG_ALBUM, [])
self._manager[ATTR_ALBUM] = album[0] if len(album) else
local_uri = .format(local_path)
except Exception:
local_uri = uri
self._player.set_state(Gst.State.NULL)
self._player.set_property(PROP_URI, local_uri)
self._player.set_state(Gst.State.PLAYING)
self.state = STATE_PLAYING
self._manager[ATTR_URI] = uri
self._manager[ATTR_DURATION] = self._duration()
self._manager[ATTR_VOLUME] = self._player.get_property(PROP_VOLUME)
_LOGGER.info(, uri, local_uri) | Play a media file. |
def state_transition(history_id_key, table_name, always_set=[], may_spend_tokens=False):
def wrap( check ):
def wrapped_check( state_engine, nameop, block_id, checked_ops ):
rc = check( state_engine, nameop, block_id, checked_ops )
if rc:
nameop[] = table_name
nameop[] = history_id_key
nameop[] = True
nameop[] = always_set
if not may_spend_tokens:
state_transition_put_account_payment_info(nameop, None, None, None)
elif not in nameop:
raise Exception()
invariant_tags = state_transition_invariant_tags()
for tag in invariant_tags:
assert tag in nameop, "BUG: missing invariant tag " % tag
for required_field in CONSENSUS_FIELDS_REQUIRED:
assert required_field in nameop, .format(required_field)
return rc
return wrapped_check
return wrap | Decorator for the check() method on state-transition operations.
Make sure that:
* there is a __table__ field set, which names the table in which this record is stored.
* there is a __history_id_key__ field set, which identifies the table record's primary key.
Any fields named in @always_set will always be set when the transition is applied.
That is, fields set here *must* be set on transition, and *will* be set in the database, even if
they have prior values in the affected name record that might constrain which rows to update. |
def import_transcript_from_fs(edx_video_id, language_code, file_name, provider, resource_fs, static_dir):
file_format = None
transcript_data = get_video_transcript_data(edx_video_id, language_code)
if not transcript_data:
try:
with resource_fs.open(combine(static_dir, file_name), ) as f:
file_content = f.read()
file_content = file_content.decode()
except ResourceNotFound as exc:
logger.warn(
,
language_code,
file_name,
edx_video_id
)
return
try:
file_format = get_transcript_format(file_content)
except Error as ex:
logger.warn(
,
edx_video_id,
language_code,
file_name
)
return
create_video_transcript(
video_id=edx_video_id,
language_code=language_code,
file_format=file_format,
content=ContentFile(file_content),
provider=provider
) | Imports transcript file from file system and creates transcript record in DS.
Arguments:
edx_video_id (str): Video id of the video.
language_code (unicode): Language code of the requested transcript.
file_name (unicode): File name of the transcript file.
provider (unicode): Transcript provider.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file. |
def CBO_L(self, **kwargs):
return (self.unstrained.CBO_L(**kwargs) +
self.CBO_strain_shift(**kwargs)) | Returns the strain-shifted L-valley conduction band offset (CBO),
assuming the strain affects all conduction band valleys equally. |
def vocabulary(self, levels=None):
url = WANIKANI_BASE.format(self.api_key, )
if levels:
url += .format(levels)
data = self.get(url)
if in data[]:
for item in data[][]:
yield Vocabulary(item)
else:
for item in data[]:
yield Vocabulary(item) | :param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list |
def urlfetch_async(self, url, method=, headers=None,
payload=None, deadline=None, callback=None,
follow_redirects=False):
headers = {} if headers is None else dict(headers)
headers.update(self.user_agent)
try:
self.token = yield self.get_token_async()
except app_identity.InternalError, e:
if os.environ.get(, ).endswith():
self.token = None
logging.warning(
)
else:
raise e
if self.token:
headers[] = + self.token
deadline = deadline or self.retry_params.urlfetch_timeout
ctx = ndb.get_context()
resp = yield ctx.urlfetch(
url, payload=payload, method=method,
headers=headers, follow_redirects=follow_redirects,
deadline=deadline, callback=callback)
raise ndb.Return(resp) | Make an async urlfetch() call.
This is an async wrapper around urlfetch(). It adds an authentication
header.
Args:
url: the url to fetch.
method: the method in which to fetch.
headers: the http headers.
payload: the data to submit in the fetch.
deadline: the deadline in which to make the call.
callback: the call to make once completed.
follow_redirects: whether or not to follow redirects.
Yields:
This returns a Future despite not being decorated with @ndb.tasklet! |
def cli(context, mongodb, username, password, authdb, host, port, loglevel, config, demo):
log_format = None
coloredlogs.install(level=loglevel, fmt=log_format)
LOG.info("Running scout version %s", __version__)
LOG.debug("Debug logging enabled.")
mongo_config = {}
cli_config = {}
if config:
LOG.debug("Use config file %s", config)
with open(config, ) as in_handle:
cli_config = yaml.load(in_handle)
mongo_config[] = (mongodb or cli_config.get() or )
if demo:
mongo_config[] =
mongo_config[] = (host or cli_config.get() or )
mongo_config[] = (port or cli_config.get() or 27017)
mongo_config[] = username or cli_config.get()
mongo_config[] = password or cli_config.get()
mongo_config[] = authdb or cli_config.get() or mongo_config[]
mongo_config[] = cli_config.get()
if context.invoked_subcommand in (, ):
mongo_config[] = None
else:
LOG.info("Setting database name to %s", mongo_config[])
LOG.debug("Setting host to %s", mongo_config[])
LOG.debug("Setting port to %s", mongo_config[])
try:
client = get_connection(**mongo_config)
except ConnectionFailure:
context.abort()
database = client[mongo_config[]]
LOG.info("Setting up a mongo adapter")
mongo_config[] = client
adapter = MongoAdapter(database)
mongo_config[] = adapter
LOG.info("Check if authenticated...")
try:
for ins_obj in adapter.institutes():
pass
except OperationFailure as err:
LOG.info("User not authenticated")
context.abort()
context.obj = mongo_config | scout: manage interactions with a scout instance. |
def delete_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
else:
(data) = cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
return data | Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def check_model(self):
for node in self.nodes():
cpd = self.get_cpds(node=node)
if cpd is None:
raise ValueError(.format(node))
elif isinstance(cpd, (TabularCPD, ContinuousFactor)):
evidence = cpd.get_evidence()
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have "
"proper parents associated with it.".format(node=node))
if not cpd.is_valid_cpd():
raise ValueError("Sum or integral of conditional probabilites for node {node}"
" is not equal to 1.".format(node=node))
return True | Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed |
def _register_handler(event, fun, external=False):
registry = core.HANDLER_REGISTRY
if external:
registry = core.EXTERNAL_HANDLER_REGISTRY
if not isinstance(event, basestring):
event = core.parse_event_to_name(event)
if event in registry:
registry[event].append(fun)
else:
registry[event] = [fun]
return fun | Register a function to be an event handler |
def from_element(self, element, defaults={}):
if isinstance(defaults, SvdElement):
defaults = vars(defaults)
for key in self.props:
try:
value = element.find(key).text
except AttributeError:
default = defaults[key] if key in defaults else None
value = element.get(key, default)
if value is not None:
if key in self.props_to_integer:
try:
value = int(value)
except ValueError:
value = int(value, 16)
elif key in self.props_to_boolean:
value = value.lower() in ("yes", "true", "t", "1")
setattr(self, key, value) | Populate object variables from SVD element |
def list_storage_containers(kwargs=None, storage_conn=None, call=None):
if call != :
raise SaltCloudSystemExit(
)
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.list_containers()
ret = {}
for item in data.containers:
ret[item.name] = object_to_dict(item)
return ret | .. versionadded:: 2015.8.0
List containers associated with the storage account
CLI Example:
.. code-block:: bash
salt-cloud -f list_storage_containers my-azure |
def restart_service(service, log=False):
with settings():
if log:
bookshelf2.logging_helpers.log_yellow(
% service)
sudo( % service)
if log:
bookshelf2.logging_helpers.log_yellow(
% service)
sudo( % service)
return True | restarts a service |
def get_default_property_values(self, classname):
schema_element = self.get_element_by_class_name(classname)
result = {
property_name: property_descriptor.default
for property_name, property_descriptor in six.iteritems(schema_element.properties)
}
if schema_element.is_edge:
result.pop(EDGE_SOURCE_PROPERTY_NAME, None)
result.pop(EDGE_DESTINATION_PROPERTY_NAME, None)
return result | Return a dict with default values for all properties declared on this class. |
def scan(self, scanner, node_list):
env = self.get_build_env()
path = self.get_build_scanner_path
kw = self.get_kw()
deps = []
for node in node_list:
node.disambiguate()
deps.extend(node.get_implicit_deps(env, scanner, path, kw))
deps.extend(self.get_implicit_deps())
for tgt in self.get_all_targets():
tgt.add_to_implicit(deps) | Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient. |
def file_rename(object_id, input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /file-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename |
def filter_records(self, records):
for record in records:
try:
filtered = self.filter_record(record)
assert (filtered)
if filtered.seq == record.seq:
self.passed_unchanged += 1
else:
self.passed_changed += 1
yield filtered
except FailedFilter as e:
self.failed += 1
v = e.value
if self.listener:
self.listener(
,
record,
filter_name=self.name,
value=v) | Apply the filter to records |
def close(self):
for reporter in self._reporters:
reporter.close()
self._metrics.clear() | Close this metrics repository. |
def yn_prompt(text):
text = "\n"+ text + "\n( or ): "
while True:
answer = input(text).strip()
if answer != and answer != :
continue
elif answer == :
return True
elif answer == :
return False | Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input. |
def transfer_bankcard(self, true_name, bank_card_no, bank_code, amount, desc=None, out_trade_no=None):
if not out_trade_no:
now = datetime.now()
out_trade_no = .format(
self.mch_id,
now.strftime(),
random.randint(1000, 10000)
)
data = {
: self.mch_id,
: out_trade_no,
: amount,
: desc,
: self._rsa_encrypt(bank_card_no),
: self._rsa_encrypt(true_name),
: bank_code,
}
return self._post(, data=data) | 企业付款到银行卡接口
:param true_name: 开户人名称
:param bank_card_no: 银行卡号
:param bank_code: 银行编号
:param amount: 付款金额,单位分
:param desc: 付款说明
:param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成
:return: 返回的结果信息 |
def delta_crl_distribution_points(self):
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = self._get_http_crl_distribution_points(self.freshest_crl_value)
return self._delta_crl_distribution_points | Returns delta CRL URLs - does not include complete CRLs
:return:
A list of zero or more DistributionPoint objects |
def on_map_clicked(self, pos):
d = self.declaration
d.clicked({
: ,
: tuple(pos)
}) | Called when the map is clicked |
def fastqfilter(self):
printtime(, self.start)
for i in range(self.cpus):
threads = Thread(target=self.filterfastq, args=())
threads.setDaemon(True)
threads.start()
for sample in self.runmetadata.samples:
self.filterqueue.put(sample)
self.filterqueue.join()
metadataprinter.MetadataPrinter(self) | Filter the reads into separate files based on taxonomic assignment |
def write(self, data):
if not hasattr(self, ):
return None
try:
self._sock.sendall(data)
if self.connection.debug > 1:
self.connection.logger.debug(
% (len(data), self._host))
return
except EnvironmentError:
| Write some bytes to the transport. |
def vae(x, z_size, name=None):
with tf.variable_scope(name, default_name="vae"):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma | Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
name: Name for the bottleneck scope.
Returns:
Embedding function, latent, loss, mu and log_simga. |
def initialize(self):
autoArgs = dict((name, getattr(self, name))
for name in self._temporalArgNames)
if self._tfdr is None:
tpClass = _getTPClass(self.temporalImp)
if self.temporalImp in [, , ,
, ,
,]:
self._tfdr = tpClass(
numberOfCols=self.columnCount,
cellsPerColumn=self.cellsPerColumn,
**autoArgs)
else:
raise RuntimeError("Invalid temporalImp") | Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`. |
def create_archive(
self,
archive_name,
authority_name,
archive_path,
versioned,
raise_on_err=True,
metadata=None,
user_config=None,
tags=None,
helper=False):
archive_metadata = self._create_archive_metadata(
archive_name=archive_name,
authority_name=authority_name,
archive_path=archive_path,
versioned=versioned,
raise_on_err=raise_on_err,
metadata=metadata,
user_config=user_config,
tags=tags,
helper=helper)
if raise_on_err:
self._create_archive(
archive_name,
archive_metadata)
else:
self._create_if_not_exists(
archive_name,
archive_metadata)
return self.get_archive(archive_name) | Create a new data archive
Returns
-------
archive : object
new :py:class:`~datafs.core.data_archive.DataArchive` object |
def agents_email_show(self, email_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/agents
api_path = "/api/v2/agents/email/{email_id}"
api_path = api_path.format(email_id=email_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id |
def update_matches(self, other):
for match in self.error_matches.all():
other_matches = TextLogErrorMatch.objects.filter(
classified_failure=other,
text_log_error=match.text_log_error,
)
if not other_matches:
match.classified_failure = other
match.save(update_fields=[])
continue
other_matches.filter(score__lt=match.score).update(score=match.score)
yield match.id | Update this instance's Matches to point to the given other's Matches.
Find Matches with the same TextLogError as our Matches, updating their
score if less than ours and mark our matches for deletion.
If there are no other matches, update ours to point to the other
ClassifiedFailure. |
def clear_measurements(self):
keys = list(self.measurements.keys())
for key in keys:
del(self.measurements[key])
self.meas_counter = -1 | Remove all measurements from self.measurements. Reset the
measurement counter. All ID are invalidated. |
def update_layers_geonode_wm(service, num_layers=None):
wm_api_url = urlparse.urljoin(service.url, )
if num_layers:
total = num_layers
else:
response = requests.get(wm_api_url)
data = json.loads(response.content)
total = data[][]
for crs_code in [, , ]:
srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code)
service.srs.add(srs)
service.update_validity()
layer_n = 0
limit = 10
for i in range(0, total, limit):
try:
url = (
% (wm_api_url, i, limit)
)
LOGGER.debug( % url)
response = requests.get(url)
data = json.loads(response.content)
for row in data[]:
typename = row[]
name = typename
uuid = row[]
LOGGER.debug( % name)
title = row[]
abstract = row[]
bbox = row[]
page_url = urlparse.urljoin(service.url, % name)
category =
if in row:
category = row[]
username =
if in row:
username = row[]
temporal_extent_start =
if in row:
temporal_extent_start = row[]
temporal_extent_end =
if in row:
temporal_extent_end = row[]
endpoint = urlparse.urljoin(service.url, )
endpoint = endpoint.replace(, )
print endpoint
if in row:
is_public = row[]
layer, created = Layer.objects.get_or_create(
service=service, catalog=service.catalog, name=name, uuid=uuid)
if created:
LOGGER.debug( % (name, uuid))
if layer.active:
links = [[, endpoint]]
layer.type =
layer.title = title
layer.abstract = abstract
layer.is_public = is_public
layer.url = endpoint
layer.page_url = page_url
layer_wm, created = LayerWM.objects.get_or_create(layer=layer)
layer_wm.category = category
layer_wm.username = username
layer_wm.temporal_extent_start = temporal_extent_start
layer_wm.temporal_extent_end = temporal_extent_end
layer_wm.save()
x0 = format_float(bbox[0])
x1 = format_float(bbox[1])
y0 = format_float(bbox[2])
y1 = format_float(bbox[3])
x0, x1 = flip_coordinates(x0, x1)
y0, y1 = flip_coordinates(y0, y1)
layer.bbox_x0 = x0
layer.bbox_y0 = y0
layer.bbox_x1 = x1
layer.bbox_y1 = y1
keywords = []
for keyword in row[]:
keywords.append(keyword[])
layer.keywords.all().delete()
for keyword in keywords:
layer.keywords.add(keyword)
layer.wkt_geometry = bbox2wktpolygon([x0, y0, x1, y1])
layer.xml = create_metadata_record(
identifier=str(layer.uuid),
source=endpoint,
links=links,
format=,
type=layer.csw_type,
relation=service.id_string,
title=layer.title,
alternative=name,
abstract=layer.abstract,
keywords=keywords,
wkt_geometry=layer.wkt_geometry
)
layer.anytext = gen_anytext(layer.title, layer.abstract, keywords)
layer.save()
add_mined_dates(layer)
add_metadata_dates_to_layer([layer_wm.temporal_extent_start, layer_wm.temporal_extent_end], layer)
layer_n = layer_n + 1
LOGGER.debug("Updated layer n. %s/%s" % (layer_n, total))
if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER:
return
except Exception as err:
LOGGER.error( % err)
url = urlparse.urljoin(service.url, )
LOGGER.debug( % url)
try:
response = requests.get(url)
data = json.loads(response.content)
for deleted_layer in data[]:
if Layer.objects.filter(uuid=deleted_layer[]).count() > 0:
layer = Layer.objects.get(uuid=deleted_layer[])
layer.was_deleted = True
layer.save()
LOGGER.debug( % layer.uuid)
except Exception as err:
LOGGER.error( % err) | Update layers for a WorldMap instance.
Sample endpoint: http://localhost:8000/ |
def balance(self, account_id=None):
if not account_id:
if len(self.accounts()) == 1:
account_id = self.accounts()[0].id
else:
raise ValueError("You need to pass account ID")
endpoint =
response = self._get_response(
method=, endpoint=endpoint,
params={
: account_id,
},
)
return MonzoBalance(data=response.json()) | Returns balance information for a specific account.
Official docs:
https://monzo.com/docs/#read-balance
:param account_id: Monzo account ID
:type account_id: str
:raises: ValueError
:returns: Monzo balance instance
:rtype: MonzoBalance |
def _to_p(self, mode):
if self.mode.endswith("A"):
chans = self.channels[:-1]
alpha = self.channels[-1]
self._secondary_mode = self.mode[:-1]
else:
chans = self.channels
alpha = None
self._secondary_mode = self.mode
palette = []
selfmask = chans[0].mask
for chn in chans[1:]:
selfmask = np.ma.mask_or(selfmask, chn.mask)
new_chn = np.ma.zeros(self.shape, dtype=int)
color_nb = 0
for i in range(self.height):
for j in range(self.width):
current_col = tuple([chn[i, j] for chn in chans])
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
new_chn[i, j] = idx
if self.fill_value is not None:
if self.mode.endswith("A"):
current_col = tuple(self.fill_value[:-1])
fill_alpha = [self.fill_value[-1]]
else:
current_col = tuple(self.fill_value)
fill_alpha = []
try:
next(idx
for idx in range(len(palette))
if palette[idx] == current_col)
except StopIteration:
idx = color_nb
palette.append(current_col)
color_nb = color_nb + 1
self.fill_value = [idx] + fill_alpha
new_chn.mask = selfmask
self.palette = palette
if alpha is None:
self.channels = [new_chn]
else:
self.channels = [new_chn, alpha]
self.mode = mode | Convert the image to P or PA mode. |
def get_group(value):
group = Group()
token, value = get_display_name(value)
if not value or value[0] != :
raise errors.HeaderParseError("expected at end of group "
"display name but found ".format(value))
group.append(token)
group.append(ValueTerminal(, ))
value = value[1:]
if value and value[0] == :
group.append(ValueTerminal(, ))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != :
raise errors.HeaderParseError(
"expected at end of group but found {}".format(value))
group.append(ValueTerminal(, ))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value | group = display-name ":" [group-list] ";" [CFWS] |
def extract_from_commoncrawl(self, warc_download_url, callback_on_article_extracted, valid_hosts=None,
start_date=None, end_date=None,
strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None,
continue_after_error=True, show_download_progress=False,
log_level=logging.ERROR, delete_warc_after_extraction=True,
log_pathname_fully_extracted_warcs=None):
self.__warc_download_url = warc_download_url
self.__filter_valid_hosts = valid_hosts
self.__filter_start_date = start_date
self.__filter_end_date = end_date
self.__filter_strict_date = strict_date
if local_download_dir_warc:
self.__local_download_dir_warc = local_download_dir_warc
self.__reuse_previously_downloaded_files = reuse_previously_downloaded_files
self.__continue_after_error = continue_after_error
self.__callback_on_article_extracted = callback_on_article_extracted
self.__show_download_progress = show_download_progress
self.__log_level = log_level
self.__delete_warc_after_extraction = delete_warc_after_extraction
self.__log_pathname_fully_extracted_warcs = log_pathname_fully_extracted_warcs
self.__run() | Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
article object.
:param log_pathname_fully_extracted_warcs:
:param delete_warc_after_extraction:
:param warc_download_url:
:param callback_on_article_extracted:
:param valid_hosts:
:param start_date:
:param end_date:
:param strict_date:
:param reuse_previously_downloaded_files:
:param local_download_dir_warc:
:param continue_after_error:
:param show_download_progress:
:param log_level:
:return: |
def search(self, field_name, field_value, record=None, **options):
records = []
from_name_and_value = AirtableParams.FormulaParam.from_name_and_value
formula = from_name_and_value(field_name, field_value)
options[] = formula
records = self.get_all(**options)
return records | Returns all matching records found in :any:`get_all`
>>> airtable.search('Gender', 'Male')
[{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ]
Args:
field_name (``str``): Name of field to match (column name).
field_value (``str``): Value of field to match.
Keyword Args:
max_records (``int``, optional): The maximum total number of
records that will be returned. See :any:`MaxRecordsParam`
view (``str``, optional): The name or ID of a view.
See :any:`ViewParam`.
fields (``str``, ``list``, optional): Name of field or fields to
be retrieved. Default is all fields. See :any:`FieldsParam`.
sort (``list``, optional): List of fields to sort by.
Default order is ascending. See :any:`SortParam`.
Returns:
records (``list``): All records that matched ``field_value`` |
def enforce_versioning(force=False):
connect_str, repo_url = get_version_data()
LOG.warning("Your database uses an unversioned benchbuild schema.")
if not force and not ui.ask(
"Should I enforce version control on your schema?"):
LOG.error("User declined schema versioning.")
return None
repo_version = migrate.version(repo_url, url=connect_str)
migrate.version_control(connect_str, repo_url, version=repo_version)
return repo_version | Install versioning on the db. |
def set(cls, obj, keys, value, fill_list_value=None):
current = obj
keys_list = keys.split(".")
for idx, key in enumerate(keys_list, 1):
if type(current) == list:
try:
key = int(key)
except ValueError:
raise cls.Missing(key)
try:
if idx == len(keys_list):
if type(current) == list:
safe_list_set(
current,
key,
lambda: copy.copy(fill_list_value),
value
)
else:
current[key] = value
return
if type(key) == int:
try:
current[key]
except IndexError:
cnext = container_for_key(keys_list[idx])
if type(cnext) == list:
def fill_with():
return []
else:
def fill_with():
return {}
safe_list_set(
current,
key,
fill_with,
[] if type(cnext) == list else {}
)
else:
if key not in current:
current[key] = container_for_key(keys_list[idx])
current = current[key]
except (IndexError, KeyError, TypeError):
raise cls.Missing(key) | sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers. |
def proximal(self, sigma):
if sigma == 0:
return odl.IdentityOperator(self.domain)
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero()
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= (1 + sigma * self.strong_convexity)
z_ /= (1 + sigma * self.strong_convexity)
if opts[] == :
if opts[]:
if opts[] is None:
opts[] = self.grad.range.zero()
p = opts[]
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts[]
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C,
self.proj_P, tol=opts[])
out *= sigma_sqrt
return out
else:
raise NotImplementedError()
return tv_prox | Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10 |
def depends_on(self, dependency):
packages = self.package_info()
return [package for package in packages if dependency in package.get("requires", "")] | List of packages that depend on dependency
:param dependency: package name, e.g. 'vext' or 'Pillow' |
def next_retrieve_group_item(self, last_item=None, entry=None):
next_item = None
gerrit_version = self.version
if gerrit_version[0] == 2 and gerrit_version[1] > 9:
if last_item is None:
next_item = 0
else:
next_item = last_item
elif gerrit_version[0] == 2 and gerrit_version[1] == 9:
cause = "Gerrit 2.9.0 does not support pagination"
raise BackendError(cause=cause)
else:
if entry is not None:
next_item = entry[]
return next_item | Return the item to start from in next reviews group. |
def qualified_name(self):
idxstr = if self.index is None else str(self.index)
return "%s[%s]" % (self.qualified_package_name, idxstr) | Get the qualified name of the variant.
Returns:
str: Name of the variant with version and index, eg "maya-2016.1[1]". |
def diff(self, obj=None):
if self.no_resource:
return NOOP
if not self.present:
if self.existing:
return DEL
return NOOP
if not obj:
obj = self.obj()
is_diff = NOOP
if self.present and self.existing:
if isinstance(self.existing, dict):
current = dict(self.existing)
if in current:
del current[]
if diff_dict(current, obj):
is_diff = CHANGED
elif is_unicode(self.existing):
if self.existing != obj:
is_diff = CHANGED
elif self.present and not self.existing:
is_diff = ADD
return is_diff | Determine if something has changed or not |
def __get_pricedb_session(self):
from pricedb import dal
if not self.pricedb_session:
self.pricedb_session = dal.get_default_session()
return self.pricedb_session | Provides initialization and access to module-level session |
def do_dep(self, args):
vals = args.split()
twin = None
if len(vals) > 1:
if len(vals) == 2:
var, plot = vals
elif len(vals) == 3:
var, plot, twin = vals
else:
var = vals[0]
plot = None
if not self._validate_var(var):
msg.err("Variable {} is not a valid file name and property combination.".format(var))
else:
if var in self.curargs["dependents"]:
self.curargs["dependents"].remove(var)
self.curargs["dependents"].append(var)
if plot is not None:
self.curargs["plottypes"][var] = plot
if twin is not None:
self.curargs["twinplots"][var] = twin | Adds the name and attribute of a dependent variable to the list
for plotting/tabulating functions. |
def is_blocked(self, ip):
blocked = True
if ip in self.allowed_admin_ips:
blocked = False
for allowed_range in self.allowed_admin_ip_ranges:
if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range):
blocked = False
return blocked | Determine if an IP address should be considered blocked. |
def addAction(self, action, checked=None, autoBuild=True):
actions = self._actionGroup.actions()
if actions and actions[0].objectName() == :
self._actionGroup.removeAction(actions[0])
actions[0].deleteLater()
if not isinstance(action, QAction):
action_name = nativestring(action)
action = QAction(action_name, self)
action.setObjectName(action_name)
action.setCheckable(self.isCheckable())
if checked or (not self._actionGroup.actions() and checked is None):
action.setChecked(True)
elif self.isCheckable():
action.setCheckable(True)
if not self.currentAction():
action.setChecked(True)
self._actionGroup.addAction(action)
if autoBuild:
self.rebuild()
return action | Adds the inputed action to this widget's action group. This will auto-\
create a new group if no group is already defined.
:param action | <QAction> || <str>
:return <QAction> |
def paint(self, painter, option, index):
body_rect = QtCore.QRectF(option.rect)
check_rect = QtCore.QRectF(body_rect)
check_rect.setWidth(check_rect.height())
check_rect.adjust(6, 6, -6, -6)
check_color = colors["idle"]
if index.data(model.IsProcessing) is True:
check_color = colors["active"]
elif index.data(model.HasFailed) is True:
check_color = colors["warning"]
elif index.data(model.HasSucceeded) is True:
check_color = colors["ok"]
elif index.data(model.HasProcessed) is True:
check_color = colors["ok"]
metrics = painter.fontMetrics()
label_rect = QtCore.QRectF(option.rect.adjusted(
check_rect.width() + 12, 2, 0, -2))
assert label_rect.width() > 0
label = index.data(model.Label)
label = metrics.elidedText(label,
QtCore.Qt.ElideRight,
label_rect.width() - 20)
font_color = colors["idle"]
if not index.data(model.IsChecked):
font_color = colors["inactive"]
painter.restore() | Paint checkbox and text
_
|_| My label |
def copy(self, texture, source_rect=None, dest_rect=None, rotation=0, center=None, flip=lib.SDL_FLIP_NONE):
if source_rect == None:
source_rect_ptr = ffi.NULL
else:
source_rect_ptr = source_rect._ptr
if dest_rect == None:
dest_rect_ptr = ffi.NULL
else:
dest_rect_ptr = dest_rect._ptr
if center == None:
center_ptr = ffi.NULL
else:
center_ptr = center._ptr
check_int_err(lib.SDL_RenderCopyEx(self._ptr, texture._ptr, source_rect_ptr, dest_rect_ptr, rotation, center_ptr, flip)) | Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center.
Args:
texture (Texture): The source texture.
source_rect (Rect): The source rectangle, or None for the entire texture.
dest_rect (Rect): The destination rectangle, or None for the entire rendering target.
rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect.
center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around
dest_rect.w/2, dest_rect.h/2).
flip (int): A value stating which flipping actions should be performed on the texture.
Raises:
SDLError: If an error is encountered. |
def get_author_label(self, urn):
author = self.get_resource_by_urn(urn)
names = author.get_names()
en_names = sorted([name[1] for name in names if name[0] == "en"], key=len)
try:
assert len(en_names) > 0
return en_names[0]
except Exception as e:
none_names = sorted([name[1] for name in names if name[0] == None], key=len)
try:
return none_names[0]
except Exception as e:
la_names = sorted([name[1] for name in names if name[0] == "la"], key=len)
try:
assert len(la_names) > 0
return la_names[0]
except Exception as e:
return None | Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found |
def _make_value(self, field_name, field_spec, value_spec, field_params, value):
if value is None and in field_params:
return VOID
specs_different = field_spec != value_spec
is_any = issubclass(field_spec, Any)
if issubclass(value_spec, Choice):
is_asn1value = isinstance(value, Asn1Value)
is_tuple = isinstance(value, tuple) and len(value) == 2
is_dict = isinstance(value, dict) and len(value) == 1
if not is_asn1value and not is_tuple and not is_dict:
raise ValueError(unwrap(
,
field_name,
type_name(value_spec)
))
if is_tuple or is_dict:
value = value_spec(value)
if not isinstance(value, value_spec):
wrapper = value_spec()
wrapper.validate(value.class_, value.tag, value.contents)
wrapper._parsed = value
new_value = wrapper
else:
new_value = value
elif isinstance(value, field_spec):
new_value = value
if specs_different:
new_value.parse(value_spec)
elif (not specs_different or is_any) and not isinstance(value, value_spec):
if (not is_any or specs_different) and isinstance(value, Asn1Value):
raise TypeError(unwrap(
,
field_name,
type_name(value_spec),
type_name(value)
))
new_value = value_spec(value, **field_params)
else:
if isinstance(value, value_spec):
new_value = value
else:
if isinstance(value, Asn1Value):
raise TypeError(unwrap(
,
field_name,
type_name(value_spec),
type_name(value)
))
new_value = value_spec(value)
if specs_different and not is_any:
wrapper = field_spec(value=new_value.dump(), **field_params)
wrapper._parsed = (new_value, new_value.__class__, None)
new_value = wrapper
new_value = _fix_tagging(new_value, field_params)
return new_value | Contructs an appropriate Asn1Value object for a field
:param field_name:
A unicode string of the field name
:param field_spec:
An Asn1Value class that is the field spec
:param value_spec:
An Asn1Value class that is the vaue spec
:param field_params:
None or a dict of params for the field spec
:param value:
The value to construct an Asn1Value object from
:return:
An instance of a child class of Asn1Value |
def kmeans(*args, **kwargs):
lon, lat = _convert_measurements(args, kwargs.get(, ))
num = kwargs.get(, 2)
bidirectional = kwargs.get(, True)
tolerance = kwargs.get(, 1e-5)
points = lon, lat
dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional)
center_lon = np.random.choice(lon, num)
center_lat = np.random.choice(lat, num)
centers = np.column_stack([center_lon, center_lat])
while True:
dists = np.array([dist(item) for item in centers]).T
closest = dists.argmin(axis=1)
new_centers = []
for i in range(num):
mask = mask = closest == i
_, vecs = cov_eig(lon[mask], lat[mask], bidirectional)
new_centers.append(stereonet_math.cart2sph(*vecs[:,-1]))
if np.allclose(centers, new_centers, atol=tolerance):
break
else:
centers = new_centers
return centers | Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster. |
def triangle_center(tri, uv=False):
if uv:
data = [t.uv for t in tri]
mid = [0.0, 0.0]
else:
data = tri.vertices
mid = [0.0, 0.0, 0.0]
for vert in data:
mid = [m + v for m, v in zip(mid, vert)]
mid = [float(m) / 3.0 for m in mid]
return tuple(mid) | Computes the center of mass of the input triangle.
:param tri: triangle object
:type tri: elements.Triangle
:param uv: if True, then finds parametric position of the center of mass
:type uv: bool
:return: center of mass of the triangle
:rtype: tuple |
def enable(self, size, block_size=None, store=None, store_sync_interval=None):
self._set(, size)
self._set(, block_size)
self._set(, store)
self._set(, store_sync_interval)
return self._section | Enables shared queue of the given size.
:param int size: Queue size.
:param int block_size: Block size in bytes. Default: 8 KiB.
:param str|unicode store: Persist the queue into file.
:param int store_sync_interval: Store sync interval in master cycles (usually seconds). |
def _pastore8(ins):
*
output = _paddr(ins.quad[1])
value = ins.quad[2]
if value[0] == :
value = value[1:]
indirect = True
else:
indirect = False
try:
value = int(ins.quad[2]) & 0xFFFF
if indirect:
output.append( % value)
output.append()
else:
value &= 0xFF
output.append( % value)
except ValueError:
output.append()
output.append()
return output | Stores 2º operand content into address of 1st operand.
1st operand is an array element. Dimensions are pushed into the
stack.
Use '*' for indirect store on 1st operand (A pointer to an array) |
def evaluate_binop_math(self, operation, left, right, **kwargs):
if not operation in self.binops_math:
raise ValueError("Invalid math binary operation ".format(operation))
if left is None or right is None:
return None
if not isinstance(left, (list, ListIP)):
left = [left]
if not isinstance(right, (list, ListIP)):
right = [right]
if not left or not right:
return None
try:
vect = self._calculate_vector(operation, left, right)
if len(vect) > 1:
return vect
return vect[0]
except:
return None | Evaluate given mathematical binary operation with given operands. |
def get_html_keywords(index_page):
keyword_lists = (
keyword_list.split(",")
for keyword_list in parse_meta(index_page, "keywords", "HTML")
)
return [
SourceString(keyword.strip(), source="HTML")
for keyword in sum(keyword_lists, [])
] | Return list of `keywords` parsed from HTML ``<meta>`` tags.
Args:
index_page (str): Content of the page as UTF-8 string
Returns:
list: List of :class:`.SourceString` objects. |
async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict:
try:
data = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
text = await response.text()
logging.debug(.format(text))
raise CloudStackClientException(message="Could not decode content. Server did not return json content!")
else:
data = self._transform_data(data)
if response.status != 200:
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode", response.status),
error_text=data.get("errortext"),
response=data)
while await_final_result and ( in data):
await asyncio.sleep(self.async_poll_latency)
data = await self.queryAsyncJobResult(jobid=data[])
if data[]:
if not data[]:
try:
return data[]
except KeyError:
pass
logging.debug("Async CloudStack call returned {}".format(str(data)))
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode"),
error_text=data.get("errortext"),
response=data)
return data | Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict |
def edit_rrset(self, zone_name, rtype, owner_name, ttl, rdata, profile=None):
if type(rdata) is not list:
rdata = [rdata]
rrset = {"ttl": ttl, "rdata": rdata}
if profile:
rrset["profile"] = profile
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name
return self.rest_api_connection.put(uri, json.dumps(rrset)) | Updates an existing RRSet in the specified zone.
Arguments:
zone_name -- The zone that contains the RRSet. The trailing dot is optional.
rtype -- The type of the RRSet. This can be numeric (1) or
if a well-known name is defined for the type (A), you can use it instead.
owner_name -- The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
ttl -- The updated TTL value for the RRSet.
rdata -- The updated BIND data for the RRSet as a string.
If there is a single resource record in the RRSet, you can pass in the single string.
If there are multiple resource records in this RRSet, pass in a list of strings.
profile -- The profile info if this is updating a resource pool |
def stop(self, signum=None, frame=None):
BackgroundProcess.objects.filter(pk=self.process_id
).update(pid=0, last_update=now(), message=)
self.cleanup()
BackgroundProcess.objects.filter(pk=self.process_id).update(pid=0,
last_update=now(),
message=) | handel's a termination signal |
def relations_dict(rel_lst):
dc = {}
for c in rel_lst:
for i in c:
for k in c:
dc.setdefault(i, [])
dc[i].append(k)
do = {}
for k in list(dc.keys()):
if dc[k]:
vl = list(set(dc[k]))
vl.remove(k)
do[k] = vl
return do | constructs a relation's dictionary from a list that describes amphidromus relations between objects
:param list rel_lst: a relationships list of the form [[a,b],[c, a, b]] # can include duplicates
:returns: a dictionary
:Example:
>>> rl = [('a', 'b', 'c'), ('a', 'x', 'y'), ('x', 'y', 'z')]
>>> relations_dict(rl)
{'a': ['x', 'c', 'b', 'y'], 'c': ['a', 'b'], 'b': ['a', 'c'], 'y': ['a', 'x', 'z'], 'x': ['a', 'z', 'y'],
'z': ['y', 'x']} |
def clear(self):
self.blockSignals(True)
items = list(self.items())
for item in items:
item.close()
self.blockSignals(False)
self._currentIndex = -1
self.currentIndexChanged.emit(self._currentIndex) | Clears out all the items from this tab bar. |
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None):
prop_filenames = DriverWrappersPool.get_configured_value(, tc_config_prop_filenames,
)
prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in
prop_filenames.split()]
prop_filenames = .join(prop_filenames)
if self.config_properties_filenames != prop_filenames:
self.config = ExtendedConfigParser.get_config_from_file(prop_filenames)
self.config_properties_filenames = prop_filenames
self.config.update_properties(os.environ)
if behave_properties:
self.config.update_properties(behave_properties) | Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties |
def split_strings(subtree: dict) -> List[str]:
strings = subtree["strings"]
lengths = subtree["lengths"]
if lengths.shape[0] == 0 and strings.shape[0] == 0:
return []
strings = strings[0]
if subtree.get("str", True):
strings = strings.decode("utf-8")
result = [None] * lengths.shape[0]
offset = 0
for i, l in enumerate(lengths):
result[i] = strings[offset:offset + l]
offset += l
return result | Produce the list of strings from the dictionary with concatenated chars \
and lengths. Opposite to :func:`merge_strings()`.
:param subtree: The dict with "strings" and "lengths".
:return: :class:`list` of :class:`str`-s or :class:`bytes`. |
def search(cls, term, weights=None, with_score=False, score_alias=,
explicit_ordering=False):
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering) | Full-text search using selected `term`. |
def _get_neighbor_conf(neigh_ip_address):
neigh_conf = \
CORE_MANAGER.neighbors_conf.get_neighbor_conf(neigh_ip_address)
if not neigh_conf:
raise RuntimeConfigError(desc=
% neigh_ip_address)
assert isinstance(neigh_conf, NeighborConf)
return neigh_conf | Returns neighbor configuration for given neighbor ip address.
Raises exception if no neighbor with `neigh_ip_address` exists. |
def read_tmy3(filename=None, coerce_year=None, recolumn=True):
s Manual
[1], the TMY3 Users Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recvd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recvd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1], Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHI Amount of diffuse horizontal radiation recvd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1], Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNillum Avg. direct normal illuminance recvd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1], Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.Zenithlum Avg. luminance at the sky
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError(
)
head = [, , , , , , ]
if filename.startswith():
request = Request(filename, headers={: (
)})
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors=))
else:
data = pd.read_csv(
csvdata, header=0,
parse_dates={: [, ]},
date_parser=lambda *x: _parsedate(*x, year=coerce_year),
index_col=)
if recolumn:
data = _recolumn(data)
data = data.tz_localize(int(meta[] * 3600))
return data, meta | Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1], the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value.
recolumn : bool, default True
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1], Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1], Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1], Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1], Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1], Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1], Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1], Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUnertainty See [1], Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1], Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1], Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1], Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1], Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1], Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1], Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1], Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1], Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1], Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1], Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1], Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1], Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1], Table 1-6
TMYData.PresWth Present weather code, see [2].
TMYData.PresWthSource Present weather code source, see [2].
TMYData.PresWthUncertainty Present weather code uncertainty, see [2].
============================= ======================================================================================================================================================
References
----------
[1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
[2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364. |
def get_metric_fns(metric_names, labels, outputs):
metric_fns = {}
for metric_name in metric_names:
metric_fn_name = metric_name.split("/")[-1]
if hasattr(metrics, metric_fn_name):
metric_fn = getattr(metrics, metric_fn_name)
metric_fns[metric_name] = metric_fn(labels, outputs)
else:
raise ValueError("Metric {} is not implemented".format(metric_fn_name))
return metric_fns | Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name. |
def parse(schema):
if not isinstance(schema, basestring):
schema = schema.read()
message = re.compile(r)
field = re.compile(r)
registry = MessageRegistry({})
messages = registry.messages
curr = None
names = None
for lineno, line in enumerate(schema.split()):
line = line.strip()
if in line:
line = line[:line.index()]
if line == :
continue
f = field.match(line)
if f:
if curr is None:
raise ParseError(
% lineno)
name = f.group(1)
type = f.group(2)
if name not in names:
f = Field(curr, name, type)
curr.fields.append(f)
names.add(name)
continue
else:
raise ParseError(
% (name, lineno))
m = message.match(line)
if m:
name, vers = m.group(1), int(m.group(2))
if (name, vers) in messages:
raise ParseError( % (name, vers))
curr = messages[(name, vers)] = Message(registry, name, vers, [])
names = set()
continue
for message in registry.messages.values():
message.fields = tuple(message.fields)
return registry | Parse `schema`, either a string or a file-like object, and
return a :class:`MessageRegistry` with the loaded messages. |
def fetch(self, zookeeper_path, settings=None):
return self._partition_operation_sql(, settings=settings, from_part=zookeeper_path) | Download a partition from another server.
:param zookeeper_path: Path in zookeeper to fetch from
:param settings: Settings for executing request to ClickHouse over db.raw() method
:return: SQL Query |
def measure(function, xs, ys, popt, weights):
m = 0
n = 0
for x in xs:
try:
if len(popt) == 2:
m += (ys[n] - function(x, popt[0], popt[1]))**2 * weights[n]
elif len(popt) == 3:
m += (ys[n] - function(x, popt[0], popt[1], popt[2]))**2 * weights[n]
else:
raise NotImplementedError
n += 1
except IndexError:
raise RuntimeError(, x, )
return m | measure the quality of a fit |
def get_file(self, attr_name):
s attribute.'
return os.path.abspath(os.path.join(self.folder, "{}.log"
.format(attr_name))) | Return absolute path to logging file for obj's attribute. |
def _subspan(self, s, span, nextspan):
text = s[span[0]:span[1]]
lowertext = text.lower()
if span[1] - span[0] < 2 or text in self.SPLIT or text in self.SPLIT_END_WORD or text in self.SPLIT_START_WORD or lowertext in self.NO_SPLIT:
return [span]
if text.startswith() or text.startswith() or text.startswith():
return [span]
if self.split_last_stop and nextspan is None and text not in self.NO_SPLIT_STOP and not text[-3:] == :
if text[-1] == :
return self._split_span(span, -1)
ind = text.rfind()
if ind > -1 and all(t in ‘’"“”)]}()IR()^(\d+\.\d+|\d{3,})(\([a-z]+\))$t within token
for bpair in [(, ), (, ), (, )]:
if text.startswith(bpair[0]) and self._closing_bracket_index(text, bpair=bpair) is None:
return self._split_span(span, 1, 0)
if text.endswith(bpair[1]) and self._opening_bracket_index(text, bpair=bpair) is None:
return self._split_span(span, -1, 0)
for i, char in enumerate(text):
before = text[:i]
after = text[i+1:]
if char in {, }:
return self._split_span(span, i, 1)
elif char in {, , }:
if (i == 0 or self._is_number(before)) and self._is_number(after):
return self._split_span(span, i, 1)
if char == and before and before[-1].isalpha() and after and after[0].isalpha():
return self._split_span(span, i, 1)
elif char == :
if not (before and after and before[-1] == and after[0] == ):
return self._split_span(span, i, 1)
elif char == :
if not (before and after and before[-1] in self.NO_SPLIT_SLASH and after[0] in self.NO_SPLIT_SLASH):
return self._split_span(span, i, 1)
elif char == :
if not (before and before[-1] == ):
return self._split_span(span, i, 1)
if before and before[-1] == :
if not text == and not self._is_saccharide_arrow(before[:-1], after):
return self._split_span(span, i-1, 2)
elif char is and not self._is_saccharide_arrow(before, after):
return self._split_span(span, i, 1)
elif char == and self._is_number(before) and not in after and not in after:
return self._split_span(span, i, 1)
elif char == :
lowerbefore = lowertext[:i]
lowerafter = lowertext[i+1:]
if lowerafter[:7] == :
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 7), (span[0] + i + 7, span[0] + i + 8), (span[0] + i + 8, span[1])]
if lowerafter[:5] in {, }:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[0] + i + 6), (span[0] + i + 6, span[1])]
if lowerafter[:3] in {, , , , , }:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[1])]
if lowerafter[:4] in {, , }:
return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[1])]
split = True
if lowerafter == :
split = True
elif bracket_level(text) == 0 and (not bracket_level(after) == 0 or not bracket_level(before) == 0):
split = False
elif lowerbefore in self.NO_SPLIT_PREFIX or lowerafter in self.NO_SPLIT_SUFFIX:
split = False
elif lowerafter in self.SPLIT_SUFFIX:
split = True
elif len(before) <= 1 or len(after) <= 2:
split = False
if split:
return self._split_span(span, i, 1)
quantity = self.QUANTITY_RE.search(text)
if quantity:
return self._split_span(span, len(quantity.group(6) or quantity.group(3) or quantity.group(2)), 0)
if text.startswith() and self._is_number(text[2:]):
return self._split_span(span, 2, 0)
for contraction in self.CONTRACTIONS:
if lowertext == contraction[0]:
return self._split_span(span, contraction[1])
if nextspan:
nexttext = s[nextspan[0]:nextspan[1]]
if nexttext == :
ind = text.rfind()
if ind > -1 and text[ind + 1:] in {, , , , , , , , , , , }:
return self._split_span(span, ind, 1)
return [span] | Recursively subdivide spans based on a series of rules. |
def _bsecurate_cli_component_file_refs(args):
data = curate.component_file_refs(args.files)
s =
for cfile, cdata in data.items():
s += cfile +
rows = []
for el, refs in cdata:
rows.append(( + el, .join(refs)))
s += .join(format_columns(rows)) +
return s | Handles the component-file-refs subcommand |
def available_backends():
print
for name, backend in current_plugin.backend_classes.iteritems():
print cformat().format(name, backend.title, backend.description) | Lists the currently available backend types |
def init(plugin_manager, course_factory, client, config):
courseid = config.get(, )
course = course_factory.get_course(courseid)
page_pattern = config.get(, )
return_fields = re.compile(config.get(, ))
client_buffer = ClientBuffer(client)
client_sync = ClientSync(client)
class ExternalGrader(INGIniousPage):
def GET(self):
return
def keep_only_config_return_values(self, job_return):
return {key: value for key, value in job_return.items() if return_fields.match(key)}
def POST(self):
web.header(, )
web.header(, )
post_input = web.input()
if "input" in post_input and "taskid" in post_input:
try:
task_input = json.loads(post_input.input)
except:
return json.dumps({"status": "error", "status_message": "Cannot decode input"})
try:
task = course.get_task(post_input.taskid)
except:
return json.dumps({"status": "error", "status_message": "Cannot open task"})
if not task.input_is_consistent(task_input, self.default_allowed_file_extensions, self.default_max_file_size):
return json.dumps({"status": "error", "status_message": "Input is not consistent with the task"})
if post_input.get("async") is None:
try:
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_sync.new_job(task, task_input, "Plugin - Simple Grader")
job_return = {"result":result, "grade": grade, "problems": problems, "tests": tests, "custom": custom, "state": state, "archive": archive, "stdout": stdout, "stderr": stderr}
except:
return json.dumps({"status": "error", "status_message": "An internal error occurred"})
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
jobid = client_buffer.new_job(task, task_input, "Plugin - Simple Grader")
return json.dumps({"status": "done", "jobid": str(jobid)})
elif "jobid" in post_input:
if client_buffer.is_waiting(post_input["jobid"]):
return json.dumps({"status": "waiting"})
elif client_buffer.is_done(post_input["jobid"]):
result, grade, problems, tests, custom, state, archive, stdout, stderr = client_buffer.get_result(post_input["jobid"])
job_return = {"result": result, "grade": grade, "problems": problems, "tests": tests,
"custom": custom, "archive": archive, "stdout": stdout, "stderr": stderr}
return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items())))
else:
return json.dumps({"status": "error", "status_message": "There is no job with jobid {}".format(post_input["jobid"])})
else:
return json.dumps({"status": "error", "status_message": "Unknown request type"})
plugin_manager.add_page(page_pattern, ExternalGrader) | Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database.
Available configuration:
::
plugins:
- plugin_module: inginious.frontend.plugins.simple_grader
courseid : "external"
page_pattern: "/external"
return_fields: "^(result|text|problems)$"
The grader will only return fields that are in the job return dict if their key match return_fields.
Different types of request are available : see documentation |
def update_product(
self,
product,
location=None,
product_id=None,
update_mask=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
self.log.info(, product.name)
response = client.update_product(
product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info(, response.name if response else )
self.log.debug(, response)
return MessageToDict(response) | For the documentation see:
:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator` |
def index_of_nearest(p, hot_points, distance_f=distance):
min_dist = None
nearest_hp_i = None
for i, hp in enumerate(hot_points):
dist = distance_f(p, hp)
if min_dist is None or dist < min_dist:
min_dist = dist
nearest_hp_i = i
return nearest_hp_i | Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty |
def markdown(self, text, mode=, context=, raw=False):
data = None
json = False
headers = {}
if raw:
url = self._build_url(, )
data = text
headers[] =
else:
url = self._build_url()
data = {}
if text:
data[] = text
if mode in (, ):
data[] = mode
if context:
data[] = context
json = True
if data:
req = self._post(url, data=data, json=json, headers=headers)
if req.ok:
return req.content
return | Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no
gfm, no context
:returns: str -- HTML formatted text |
def from_dict(data, ctx):
data = data.copy()
if data.get() is not None:
data[] = ctx.convert_decimal_number(
data.get()
)
if data.get() is not None:
data[] = ctx.convert_decimal_number(
data.get()
)
if data.get() is not None:
data[] = ctx.convert_decimal_number(
data.get()
)
return InstrumentCommission(**data) | Instantiate a new InstrumentCommission from a dict (generally from
loading a JSON response). The data used to instantiate the
InstrumentCommission is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately. |
def validate_types(self, definition):
if not self._strict_type_checks:
return
REQUIRED_FIELDS = {
: list,
: string_types,
: string_types,
: string_types,
: list,
: list,
}
OPTIONAL_FIELDS = {
: string_types,
: string_types,
: bool,
}
LIST_FIELDS_CONTENT = {
: int,
: string_types,
: string_types,
}
ALL_FIELDS = REQUIRED_FIELDS.copy()
ALL_FIELDS.update(OPTIONAL_FIELDS)
missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
if len(missing_fields) > 0:
raise ParserError(self._name + +
.join(missing_fields) +
.format(BASE_DOC_URL))
unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
if len(unknown_fields) > 0:
raise ParserError(self._name + + .join(unknown_fields) +
.format(BASE_DOC_URL))
wrong_type_names = [.format(f, utils.nice_type_name(ALL_FIELDS[f]))
for f in definition.keys()
if not isinstance(definition[f], ALL_FIELDS[f])]
if len(wrong_type_names) > 0:
raise ParserError(self._name + + .join(wrong_type_names) +
.format(BASE_DOC_URL))
list_fields = [f for f in definition if isinstance(definition[f], list)]
for field in list_fields:
if len(definition[field]) == 0:
raise ParserError(("Field for probe must not be empty" +
".\nSee: {}
.format(field, self._name, BASE_DOC_URL))
broken_types =\
[not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]]
if any(broken_types):
raise ParserError(("Field for probe must only contain values of type {}"
".\nSee: {}
.format(field, self._name, utils.nice_type_name(LIST_FIELDS_CONTENT[field]),
BASE_DOC_URL)) | This function performs some basic sanity checks on the scalar definition:
- Checks that all the required fields are available.
- Checks that all the fields have the expected types.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field is of the wrong type.
:raises ParserError: if a required field is missing or unknown fields are present. |
def find_imports(self, pbds):
imports = list(set(self.uses).difference(set(self.defines)))
for imp in imports:
for p in pbds:
if imp in p.defines:
self.imports.append(p.name)
break
self.imports = list(set(self.imports))
for import_file in self.imports:
self.lines.insert(2, .format(import_file)) | Find all missing imports in list of Pbd instances. |
def load_entrypoint_plugins(entry_points, airflow_plugins):
for entry_point in entry_points:
log.debug(, entry_point.name)
plugin_obj = entry_point.load()
if is_valid_plugin(plugin_obj, airflow_plugins):
if callable(getattr(plugin_obj, , None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
return airflow_plugins | Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin] |
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_nbr_interface_type(self, **kwargs):
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member")
trunk_list_nbr_interface_type = ET.SubElement(trunk_list_member, "trunk-list-nbr-interface-type")
trunk_list_nbr_interface_type.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def get_pickled_sizes(obj):
sizes = []
attrs = getattr(obj, , {})
for name, value in attrs.items():
sizes.append((name, len(Pickled(value))))
return len(Pickled(obj)), sorted(
sizes, key=lambda pair: pair[1], reverse=True) | Return the pickled sizes of an object and its direct attributes,
ordered by decreasing size. Here is an example:
>> total_size, partial_sizes = get_pickled_sizes(Monitor(''))
>> total_size
345
>> partial_sizes
[('_procs', 214), ('exc', 4), ('mem', 4), ('start_time', 4),
('_start_time', 4), ('duration', 4)]
Notice that the sizes depend on the operating system and the machine. |
def attributes(self):
attr_impl_nodes = self.adapter.get_node_attributes(self.impl_node)
return AttributeDict(attr_impl_nodes, self.impl_node, self.adapter) | Get or set this element's attributes as name/value pairs.
.. note::
Setting element attributes via this accessor will **remove**
any existing attributes, as opposed to the :meth:`set_attributes`
method which only updates and replaces them. |
def _from_binary_acl(cls, binary_stream):
rev_number, size, ace_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
aces = []
offset = cls._REPR.size
for i in range(ace_len):
ace = ACE.create_from_binary(binary_stream[offset:])
offset += len(ace)
aces.append(ace)
_MOD_LOGGER.debug("Next ACE offset = %d", offset)
nw_obj = cls((rev_number, size, aces))
_MOD_LOGGER.debug("Attempted to unpack SID from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj | See base class. |
def handleUpgradeTxn(self, txn) -> None:
FINALIZING_EVENT_TYPES = [UpgradeLog.Events.succeeded, UpgradeLog.Events.failed]
if get_type(txn) != POOL_UPGRADE:
return
logger.info("Node handles upgrade txn {}".format(self.nodeName, txn))
txn_data = get_payload_data(txn)
action = txn_data[ACTION]
version = txn_data[VERSION]
justification = txn_data.get(JUSTIFICATION)
pkg_name = txn_data.get(PACKAGE, self.config.UPGRADE_ENTRY)
upgrade_id = self.get_action_id(txn)
try:
version = src_version_cls(pkg_name)(version)
except InvalidVersionError as exc:
logger.warning(
"{} can{}{}{}{}{}{}' cancels upgrade to {}".format(
self.nodeName, version))
return
logger.error(
"Got {} transaction with unsupported action {}".format(
POOL_UPGRADE, action)) | Handles transaction of type POOL_UPGRADE
Can schedule or cancel upgrade to a newer
version at specified time
:param txn: |
def generate(env):
SCons.Tool.createStaticLibBuilder(env)
SCons.Tool.createSharedLibBuilder(env)
SCons.Tool.createProgBuilder(env)
env[] =
env[] =
env[] =
env[] =
env[] =
env[] =
env[] =
env[] =
env[] =
env[] =
env[] = shlib_action
env[]= shlib_emitter
env[]= shlib_emitter | Add Builders and construction variables for lib to an Environment. |
def show_storage_container_metadata(kwargs=None, storage_conn=None, call=None):
s metadata
CLI Example:
.. code-block:: bash
salt-cloud -f show_storage_container_metadata my-azure name=myservice
name:
Name of container to show.
lease_id:
If specified, show_storage_container_metadata only succeeds if the
container
if call != :
raise SaltCloudSystemExit(
)
if kwargs is None:
kwargs = {}
if not in kwargs:
raise SaltCloudSystemExit()
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.get_container_metadata(
container_name=kwargs[],
x_ms_lease_id=kwargs.get(, None),
)
return data | .. versionadded:: 2015.8.0
Show a storage container's metadata
CLI Example:
.. code-block:: bash
salt-cloud -f show_storage_container_metadata my-azure name=myservice
name:
Name of container to show.
lease_id:
If specified, show_storage_container_metadata only succeeds if the
container's lease is active and matches this ID. |
def contains_vasp_input(dir_name):
for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
if not os.path.exists(os.path.join(dir_name, f)) and \
not os.path.exists(os.path.join(dir_name, f + ".orig")):
return False
return True | Checks if a directory contains valid VASP input.
Args:
dir_name:
Directory name to check.
Returns:
True if directory contains all four VASP input files (INCAR, POSCAR,
KPOINTS and POTCAR). |
def update(self, deltat=1.0):
DNFZ.update(self, deltat)
self.dist_flown += self.speed * deltat
if self.dist_flown > self.circuit_width:
self.desired_heading = self.heading + 90
self.dist_flown = 0
if self.getalt() < self.ground_height() or self.getalt() > self.ground_height() + 2000:
self.randpos()
self.randalt() | fly a square circuit |
def set_initial(self, C_in, scale_in, scale_high):
r
self.C_in = C_in
self.scale_in = scale_in
self.scale_high = scale_high | r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`. |
def _parse_plan(self, match):
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | Parse a matching plan line. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.