code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def _set_show_mpls_ldp(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_ldp.show_mpls_ldp, is_leaf=True, yang_name="show-mpls-ldp", rest_name="show-mpls-ldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "rpc",
: ,
})
self.__show_mpls_ldp = t
if hasattr(self, ):
self._set() | Setter method for show_mpls_ldp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_ldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_ldp() directly. |
def escape_windows_cmd_string(s):
for c in :
s = s.replace(c, + c)
s = s.replace(, )
return s | Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php |
def save_var(name, value):
connection = _State.connection()
_State.reflect_metadata()
vars_table = sqlalchemy.Table(
_State.vars_table_name, _State.metadata,
sqlalchemy.Column(, sqlalchemy.types.Text, primary_key=True),
sqlalchemy.Column(, sqlalchemy.types.LargeBinary),
sqlalchemy.Column(, sqlalchemy.types.Text),
keep_existing=True
)
vars_table.create(bind=connection, checkfirst=True)
column_type = get_column_type(value)
if column_type == sqlalchemy.types.LargeBinary:
value_blob = value
else:
value_blob = unicode(value).encode()
values = dict(name=name,
value_blob=value_blob,
type=column_type.__visit_name__.lower())
vars_table.insert(prefixes=[]).values(**values).execute() | Save a variable to the table specified by _State.vars_table_name. Key is
the name of the variable, and value is the value. |
def set_pixel(self, x, y, value):
if x < 0 or x > 7 or y < 0 or y > 7:
return
self.set_led(y * 16 + x, 1 if value & GREEN > 0 else 0)
self.set_led(y * 16 + x + 8, 1 if value & RED > 0 else 0) | Set pixel at position x, y to the given value. X and Y should be values
of 0 to 8. Value should be OFF, GREEN, RED, or YELLOW. |
def convert_string_to_number(value):
if value is None:
return 1
if isinstance(value, int):
return value
if value.isdigit():
return int(value)
num_list = map(lambda s: NUMBERS[s], re.findall(numbers + , value.lower()))
return sum(num_list) | Convert strings to numbers |
def get_allow_repeat_items_metadata(self):
metadata = dict(self._allow_repeat_items_metadata)
metadata.update({: self.my_osid_object_form._my_map[]})
return Metadata(**metadata) | get the metadata for allow repeat items |
def describe(table_name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
return table.describe() | Describe a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.describe table_name region=us-east-1 |
def is_valid_delta_name(file):
filename = basename(file)
pattern = re.compile(Delta.FILENAME_PATTERN)
if re.match(pattern, filename):
return True
return False | Return if a file has a valid name
A delta file name can be:
- pre-all.py
- pre-all.sql
- delta_x.x.x_ddmmyyyy.pre.py
- delta_x.x.x_ddmmyyyy.pre.sql
- delta_x.x.x_ddmmyyyy.py
- delta_x.x.x_ddmmyyyy.sql
- delta_x.x.x_ddmmyyyy.post.py
- delta_x.x.x_ddmmyyyy.post.sql
- post-all.py
- post-all.sql
where x.x.x is the version number and _ddmmyyyy is an optional
description, usually representing the date of the delta file |
def stream_directory(directory,
recursive=False,
patterns=,
chunk_size=default_chunk_size):
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
chunk_size=chunk_size)
return stream.body(), stream.headers | Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk |
def filelist(jottapath, JFS):
log.debug("filelist %r", jottapath)
try:
jf = JFS.getObject(jottapath)
except JFSNotFoundError:
return set()
if not isinstance(jf, JFSFolder):
return False
return set([f.name for f in jf.files() if not f.is_deleted()]) | Get a set() of files from a jottapath (a folder) |
def render_to_message(self, extra_context=None, *args, **kwargs):
message = super(TemplatedHTMLEmailMessageView, self)\
.render_to_message(extra_context, *args, **kwargs)
if extra_context is None:
extra_context = {}
context = self.get_context_data(**extra_context)
content = self.render_html_body(context)
message.attach_alternative(content, mimetype=)
return message | Renders and returns an unsent message with the given context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class` |
def update_from_file(yaml_dict, filepaths):
yaml_dict.update(registry.load(filepaths, list(yaml_dict))) | Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list. |
def csv(self, jql, limit=1000):
url = .format(
limit=limit, jql=jql)
return self.get(url, not_json_response=True, headers={: }) | Get issues from jql search result with all related fields
:param jql: JQL query
:param limit: max results in the output file
:return: CSV file |
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind) | Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None. |
def get_display_dataframe(self):
X = self.corpus_.get_term_doc_mat()
categories = pd.Series(self.corpus_.get_category_ids())
cat_ar = np.array(self.corpus_.get_categories())
cat_idx_sort = np.argsort(cat_ar)
if self.seasonality_column_:
print()
seasonality_ar = np.array(self.corpus_.get_df()[self.seasonality_column_])
terms = self.corpus_.get_terms()
category_idx_store = self.corpus_.get_category_index_store()
data = {}
seasondata = {}
for i, cati in enumerate(cat_idx_sort):
cat = cat_ar[cati]
if cat >= self.start_category_ and i > self.timesteps_to_lag_:
neg_cats = self.sorted_categores_[i - self.timesteps_to_lag_:i]
neg_mask = categories.isin(category_idx_store.getidxstrictbatch(neg_cats)).values
scores = self._regress_terms(X, cat, categories, category_idx_store, neg_mask, terms)
data[cat] = scores
if self.seasonality_column_:
neg_cats = set(categories[(seasonality_ar == seasonality_ar[cati]) & (categories != categories[cati])])
neg_mask = categories.isin(neg_cats).values
scores = self._regress_terms(X, cat, categories, category_idx_store, neg_mask, terms)
seasondata[cat] = scores
coefs = pd.DataFrame(data)
pos_coefs = (coefs.apply(lambda x: (x > 0) * x, axis=1)
.sum(axis=1)
.sort_values(ascending=False))
term_cat_counts = self.corpus_.get_term_freq_df()[coefs.columns]
def dense_percentile(x):
return pd.Series(x / x.max(), index=x.index)
rank_df = pd.DataFrame({: dense_percentile(pos_coefs),
: dense_percentile(term_cat_counts.max(axis=1)),
: pos_coefs,
: term_cat_counts.max(axis=1)})
if self.seasonality_column_:
seasoncoefs = (pd.DataFrame(seasondata).sum(axis=1))
rank_df[] = dense_percentile(seasoncoefs.sort_values(ascending=False) + np.abs(seasoncoefs.min()))
weights = [2, 1, 1]
vals = [, , ]
def gethmean(x):
if min(x[vals]) == 0:
return 0
return sum(weights) * 1. / sum([weights[i] / x[val] for i, val in enumerate(vals)])
rank_df[] = rank_df.apply(gethmean, axis=1)
else:
beta = 0.5
rank_df[] = (rank_df
.apply(lambda x: 0 if min(x) == 0 else
(1 + beta ** 2) * (x.coefr * x.freqr) / ((beta ** 2 * x.coefr) + x.freqr),
axis=1))
rank_df = rank_df.sort_values(by=, ascending=False)
display_df = pd.merge((term_cat_counts
.loc[rank_df.iloc[:self.num_terms_].index]
.reset_index()
.melt(id_vars=[])
.rename(columns={: , : })),
(coefs.loc[rank_df.iloc[:self.num_terms_].index]
.reset_index()
.melt(id_vars=[])
.rename(columns={: , : })),
on=[, ])
display_df[display_df[] == 0] = np.nan
display_df = display_df.dropna()
return display_df[display_df.term.isin(rank_df.index)] | Gets list of terms to display that have some interesting diachronic variation.
Returns
-------
pd.DataFrame
e.g.,
term variable frequency trending
2 in 200310 1.0 0.000000
19 for 200310 1.0 0.000000
20 to 200311 1.0 0.000000 |
def copy(self):
copy_cpd = LinearGaussianCPD(self.variable, self.beta, self.variance,
list(self.evidence))
return copy_cpd | Returns a copy of the distribution.
Returns
-------
LinearGaussianCPD: copy of the distribution
Examples
--------
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
>>> copy_cpd = cpd.copy()
>>> copy_cpd.variable
'Y'
>>> copy_cpd.evidence
['X1', 'X2', 'X3'] |
def get_loc(self, key, method=None, tolerance=None):
if isinstance(key, str):
return self._get_string_slice(key)
else:
return pd.Index.get_loc(self, key, method=method,
tolerance=tolerance) | Adapted from pandas.tseries.index.DatetimeIndex.get_loc |
def _buildPointList(self):
try:
self.properties.pss.value = self.properties.network.read(
"{} device {} protocolServicesSupported".format(
self.properties.address, self.properties.device_id
)
)
except NoResponseFromController as error:
self._log.error("Controller not found, aborting. ({})".format(error))
return ("Not Found", "", [], [])
except SegmentationNotSupported as error:
self._log.warning("Segmentation not supported")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
self.properties.name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
self._log.info(
"Device {}:[{}] found... building points list".format(
self.properties.device_id, self.properties.name
)
)
try:
self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints(
self.custom_object_list
)
if self.properties.pollDelay > 0:
self.poll(delay=self.properties.pollDelay)
except NoResponseFromController as error:
self._log.error("Cannot retrieve object list, disconnecting...")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
except IndexError as error:
self._log.error("Device creation failed... disconnecting")
self.new_state(DeviceDisconnected) | Upon connection to build the device point list and properties. |
def _get_database(self, database_name):
try:
return self._client[database_name]
except InvalidName as ex:
raise DataSourceError("Cannot connect to database %s!"
% self._database) from ex | Get PyMongo client pointing to the current database.
:return: MongoDB client of the current database.
:raise DataSourceError |
def __upload_title(self, kibiter_major):
if kibiter_major == "6":
resource = ".kibana/doc/projectname"
data = {"projectname": {"name": self.project_name}}
mapping_resource = ".kibana/_mapping/doc"
mapping = {"dynamic": "true"}
url = urijoin(self.conf[][], resource)
mapping_url = urijoin(self.conf[][],
mapping_resource)
logger.debug("Adding mapping for dashboard title")
res = self.grimoire_con.put(mapping_url, data=json.dumps(mapping),
headers=ES6_HEADER)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Couldnt create dashboard title.")
logger.error(res.json()) | Upload to Kibiter the title for the dashboard.
The title is shown on top of the dashboard menu, and is Usually
the name of the project being dashboarded.
This is done only for Kibiter 6.x.
:param kibiter_major: major version of kibiter |
def split_lines(source, maxline=79):
result = []
extend = result.extend
append = result.append
line = []
multiline = False
count = 0
find = str.find
for item in source:
index = find(item, )
if index:
line.append(item)
multiline = index > 0
count += len(item)
else:
if line:
if count <= maxline or multiline:
extend(line)
else:
wrap_line(line, maxline, result)
count = 0
multiline = False
line = []
append(item)
return result | Split inputs according to lines.
If a line is short enough, just yield it.
Otherwise, fix it. |
def root_urns_for_deletion(self):
roots = set()
for urn in self._urns_for_deletion:
new_root = True
str_urn = utils.SmartUnicode(urn)
fake_roots = []
for root in roots:
str_root = utils.SmartUnicode(root)
if str_urn.startswith(str_root):
new_root = False
break
elif str_root.startswith(str_urn):
fake_roots.append(root)
if new_root:
roots -= set(fake_roots)
roots.add(urn)
return roots | Roots of the graph of urns marked for deletion. |
def wait_for(self, timeout=None):
from broqer.op import OnEmitFuture
return self | OnEmitFuture(timeout=timeout) | When a timeout should be applied for awaiting use this method.
:param timeout: optional timeout in seconds.
:returns: a future returning the emitted value |
def due(self):
invoice_charges = Charge.objects.filter(invoice=self)
invoice_transactions = Transaction.successful.filter(invoice=self)
return total_amount(invoice_charges) - total_amount(invoice_transactions) | The amount due for this invoice. Takes into account all entities in the invoice.
Can be < 0 if the invoice was overpaid. |
def connect(self, retry=0, delay=0):
if self.connected:
self._LOG.debug("Connect called, but we are connected?")
return
if self._connecting:
self._LOG.debug("Connect called, but we are already connecting.")
return
self._connecting = True
if delay:
self._LOG.debug("Delayed connect: %d seconds" % delay)
self.emit(self.EVENT_RECONNECT, delay)
self.sleep(delay)
self._LOG.debug("Connect initiated.")
for i, server_addr in enumerate(self.cm_servers):
if retry and i > retry:
return False
start = time()
if self.connection.connect(server_addr):
break
diff = time() - start
self._LOG.debug("Failed to connect. Retrying...")
if diff < 5:
self.sleep(5 - diff)
self.current_server_addr = server_addr
self.connected = True
self.emit(self.EVENT_CONNECTED)
self._recv_loop = gevent.spawn(self._recv_messages)
self._connecting = False
return True | Initiate connection to CM. Blocks until connected unless ``retry`` is specified.
:param retry: number of retries before returning. Unlimited when set to ``None``
:type retry: :class:`int`
:param delay: delay in secnds before connection attempt
:type delay: :class:`int`
:return: successful connection
:rtype: :class:`bool` |
def exif_name(self):
mapillary_description = json.loads(self.extract_image_description())
lat = None
lon = None
ca = None
date_time = None
if "MAPLatitude" in mapillary_description:
lat = mapillary_description["MAPLatitude"]
if "MAPLongitude" in mapillary_description:
lon = mapillary_description["MAPLongitude"]
if "MAPCompassHeading" in mapillary_description:
if in mapillary_description["MAPCompassHeading"]:
ca = mapillary_description["MAPCompassHeading"][]
if "MAPCaptureTime" in mapillary_description:
date_time = datetime.datetime.strptime(
mapillary_description["MAPCaptureTime"], "%Y_%m_%d_%H_%M_%S_%f").strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3]
filename = .format(
lat, lon, ca, date_time, uuid.uuid4())
return filename | Name of file in the form {lat}_{lon}_{ca}_{datetime}_{filename}_{hash} |
def doDelete(self, WHAT={}):
if hasattr(WHAT, ):
self._addDBParam(, WHAT.RECORDID)
self._addDBParam(, WHAT.MODID)
elif type(WHAT) == dict and WHAT.has_key():
self._addDBParam(, WHAT[])
else:
raise FMError, % type(WHAT)
if self._layout == :
raise FMError,
if self._checkRecordID() == 0:
raise FMError,
return self._doAction() | This function will perform the command -delete. |
def get_column_flat(self, field, components=None, computed_type=):
return self.pack_column_flat(self.get_column(field, components, computed_type),
components,
offset=field==) | TODO: add documentation
return a single merged value (hstacked) from all meshes
:parameter str field: name of the mesh columnname
:parameter components: |
def get_anchor_href(markup):
soup = BeautifulSoup(markup, )
return [ % link.get() for link in soup.find_all()] | Given HTML markup, return a list of hrefs for each anchor tag. |
def grabImage(self, index):
rect = self.visualRect(index)
pixmap = QtGui.QPixmap()
pixmap = pixmap.grabWidget(self, rect)
return pixmap | Gets an image of the item at *index*
:param index: index of an item in the view
:type index: :qtdoc:`QModelIndex`
:returns: :qtdoc:`QPixmap` |
def _shutdown_proc(p, timeout):
freq = 10
for _ in range(1 + timeout * freq):
ret = p.poll()
if ret is not None:
logging.info("Shutdown gracefully.")
return ret
time.sleep(1 / freq)
logging.warning("Killing the process.")
p.kill()
return p.wait() | Wait for a proc to shut down, then terminate or kill it after `timeout`. |
def collect_filtered_models(discard, *input_values):
ids = set([])
collected = []
queued = []
def queue_one(obj):
if obj.id not in ids and not (callable(discard) and discard(obj)):
queued.append(obj)
for value in input_values:
_visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if obj.id not in ids:
ids.add(obj.id)
collected.append(obj)
_visit_immediate_value_references(obj, queue_one)
return collected | Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc, unless filtered-out by the
provided callable.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go.
Args:
*discard (Callable[[Model], bool])
a callable which accepts a *Model* instance as its single argument
and returns a boolean stating whether to discard the instance. The
latter means that the instance will not be added to collected
models nor will its references be explored.
*input_values (Model)
Bokeh models to collect other models from
Returns:
None |
async def set_volume(self, vol: int):
if self._lavalink._server_version <= 2:
self.volume = max(min(vol, 150), 0)
else:
self.volume = max(min(vol, 1000), 0)
await self._lavalink.ws.send(op=, guildId=self.guild_id, volume=self.volume) | Sets the player's volume (150% or 1000% limit imposed by lavalink depending on the version). |
def fit_transform(self, X, y=None, **params):
return self.fit(X, y).transform(X, y) | Learn vocabulary and return document id matrix.
This is equivalent to fit followed by transform.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
Returns:
list : document id matrix.
list: label id matrix. |
def wide(self):
if not opcode_table[self.opcode].get():
return False
if self.operands[0].value >= 255:
return True
if self.opcode == 0x84:
if self.operands[1].value >= 255:
return True
return False | ``True`` if this instruction needs to be prefixed by the WIDE
opcode. |
def load_html(self, mode, html):
self._html_loaded_flag = False
if mode == HTML_FILE_MODE:
self.setUrl(QtCore.QUrl.fromLocalFile(html))
elif mode == HTML_STR_MODE:
self.setHtml(html)
else:
raise InvalidParameterError()
counter = 0
sleep_period = 0.1
timeout = 20
while not self._html_loaded_flag and counter < timeout:
counter += sleep_period
time.sleep(sleep_period)
QgsApplication.processEvents() | Load HTML to this class with the mode specified.
There are two modes that can be used:
* HTML_FILE_MODE: Directly from a local HTML file.
* HTML_STR_MODE: From a valid HTML string.
:param mode: The mode.
:type mode: int
:param html: The html that will be loaded. If the mode is a file,
then it should be a path to the htm lfile. If the mode is a string,
then it should be a valid HTML string.
:type html: str |
def create_char(self, location, bitmap):
assert 0 <= location <= 7,
assert len(bitmap) == 8,
pos = self.cursor_pos
self.command(c.LCD_SETCGRAMADDR | location << 3)
for row in bitmap:
self._send_data(row)
self.cursor_pos = pos | Create a new character.
The HD44780 supports up to 8 custom characters (location 0-7).
:param location: The place in memory where the character is stored.
Values need to be integers between 0 and 7.
:type location: int
:param bitmap: The bitmap containing the character. This should be a
tuple of 8 numbers, each representing a 5 pixel row.
:type bitmap: tuple of int
:raises AssertionError: Raised when an invalid location is passed in or
when bitmap has an incorrect size.
Example:
.. sourcecode:: python
>>> smiley = (
... 0b00000,
... 0b01010,
... 0b01010,
... 0b00000,
... 0b10001,
... 0b10001,
... 0b01110,
... 0b00000,
... )
>>> lcd.create_char(0, smiley) |
def _parse_canonical_regex(doc):
regex = doc[]
if len(doc) != 1:
raise TypeError( % (doc,))
if len(regex) != 2:
raise TypeError(
% (doc,))
return Regex(regex[], regex[]) | Decode a JSON regex to bson.regex.Regex. |
def _list_po_to_dict(tokens) -> ListAbundance:
func = tokens[FUNCTION]
dsl = FUNC_TO_LIST_DSL[func]
members = [parse_result_to_dsl(token) for token in tokens[MEMBERS]]
return dsl(members) | Convert a list parse object to a node.
:type tokens: ParseResult |
def calc_model(cortex, model_argument, model_hemi=Ellipsis, radius=np.pi/3):
if pimms.is_str(model_argument):
h = cortex.chirality if model_hemi is Ellipsis else \
None if model_hemi is None else \
model_hemi
model = retinotopy_model(model_argument, hemi=h, radius=radius)
else:
model = model_argument
if not isinstance(model, RegisteredRetinotopyModel):
raise ValueError()
return model | calc_model loads the appropriate model object given the model argument, which may given the name
of the model or a model object itself.
Required afferent parameters:
@ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that
can be loaded.
Optional afferent parameters:
@ model_hemi May be used to specify the hemisphere of the model; this is usually only used
when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if
left at the default value (Ellipsis), then it will use the hemisphere of the cortex param.
Provided efferent values:
@ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered. |
def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf):
starttime = time.time()
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "enabled"))
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "enabled"))
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_CREATE % nve_int_num))
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_ADD_LOOPBACK % ("enabled", src_intf)))
self.capture_and_print_timeshot(
starttime, "enable_vxlan",
switch=nexus_host) | Enable VXLAN on the switch. |
def source(self):
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0] | Returns the single source name for a variant collection if it is unique,
otherwise raises an error. |
def validate_location_instance_valid_for_arc(sender, instance, action, reverse, pk_set, *args, **kwargs):
if action == :
if reverse:
for apk in pk_set:
arc_node = ArcElementNode.objects.get(pk=apk)
if arc_node.parent_outline != instance.outline:
raise IntegrityError(_())
else:
for lpk in pk_set:
loc_instance = LocationInstance.objects.get(pk=lpk)
if loc_instance.outline != instance.parent_outline:
raise IntegrityError(_()) | Evaluates attempts to add location instances to arc, ensuring they are from same outline. |
def find_one(cls, *args, **kw):
if len(args) == 1 and not isinstance(args[0], Filter):
args = (getattr(cls, cls.__pk__) == args[0], )
Doc, collection, query, options = cls._prepare_find(*args, **kw)
result = Doc.from_mongo(collection.find_one(query, **options))
return result | Get a single document from the collection this class is bound to.
Additional arguments are processed according to `_prepare_find` prior to passing to PyMongo, where positional
parameters are interpreted as query fragments, parametric keyword arguments combined, and other keyword
arguments passed along with minor transformation.
Automatically calls `to_mongo` with the retrieved data.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one |
def onCancelButton(self, event):
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise() | Quit grid with warning if unsaved changes present |
def find_node_type(ast, node_type):
if type(ast) is node_type:
return [ast]
elif type(ast) is list:
return reduce(operator.add, list(map(lambda a: find_node_type(a, node_type), ast)), [])
elif ast is None:
return []
else:
return reduce(operator.add,
[find_node_type(o[1], node_type) for o in ast.children()], []) | Return list of array references in AST. |
def zoom2D(xi, yi, zi, xi_zoom=3., yi_zoom=3., order=3, mode="nearest", cval=0.):
xi = ndimage.interpolation.zoom(xi, xi_zoom, order=order, mode="nearest")
yi = ndimage.interpolation.zoom(yi, yi_zoom, order=order, mode="nearest")
zi = ndimage.interpolation.zoom(zi, (xi_zoom, yi_zoom), order=order, mode=mode, cval=cval)
return xi, yi, zi | Zoom a 2D array, with axes.
Parameters
----------
xi : 1D array
x axis points.
yi : 1D array
y axis points.
zi : 2D array
array values. Shape of (x, y).
xi_zoom : float (optional)
Zoom factor along x axis. Default is 3.
yi_zoom : float (optional)
Zoom factor along y axis. Default is 3.
order : int (optional)
The order of the spline interpolation, between 0 and 5. Default is 3.
mode : {'constant', 'nearest', 'reflect', or 'wrap'}
Points outside the boundaries of the input are filled according to the
given mode. Default is nearest.
cval : scalar (optional)
Value used for constant mode. Default is 0.0. |
def V_vertical_torispherical_concave(D, f, k, h):
r
alpha = asin((1-2*k)/(2.*(f-k)))
a1 = f*D*(1-cos(alpha))
a2 = k*D*cos(alpha)
D1 = 2*f*D*sin(alpha)
s = (k*D*sin(alpha))**2
t = 2*a2
def V1(h):
u = h-f*D*(1-cos(alpha))
v1 = pi/4*(2*a1**3/3. + a1*D1**2/2.) + pi*u*((D/2.-k*D)**2 +s)
v1 += pi*t*u**2/2. - pi*u**3/3.
v1 += pi*D*(1-2*k)*((2*u-t)/4.*(s+t*u-u**2)**0.5 + t*s**0.5/4.
+ k**2*D**2/2.*(acos((t-2*u)/(2*k*D)) -alpha))
return v1
def V2(h):
v2 = pi*h**2/4.*(2*a1 + D1**2/(2.*a1) - 4*h/3.)
return v2
if 0 <= h < a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V1(a1+a2-h)
elif a2 <= h < a1 + a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V2(a1+a2-h)
else:
Vf = pi*D**2*h/4 - V1(a1+a2)
return Vf | r'''Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/ |
def fly(self):
dst_dir = Path(self.conf_file).parent.abspath
package_dir = Path(dst_dir, self.package.shortname)
try:
if package_dir.exists():
shutil.rmtree(package_dir.abspath)
except Exception as e:
print(" can't be removed! Error: %s" % (package_dir, e))
for pkg, parent, sub_packages, sub_modules in self.package.walk():
if not is_ignored(pkg, self.ignored_package):
dir_path = Path(*([dst_dir, ] + pkg.fullname.split(".")))
init_path = Path(dir_path, "__init__.rst")
make_dir(dir_path.abspath)
make_file(
init_path.abspath,
self.generate_package_content(pkg),
)
for mod in sub_modules:
if not is_ignored(mod, self.ignored_package):
module_path = Path(dir_path, mod.shortname + ".rst")
make_file(
module_path.abspath,
self.generate_module_content(mod),
) | Generate doc tree. |
def render(self, renderer=None, **kwargs):
return Markup(get_renderer(current_app, renderer)(**kwargs).visit(
self)) | Render the navigational item using a renderer.
:param renderer: An object implementing the :class:`~.Renderer`
interface.
:return: A markupsafe string with the rendered result. |
def main():
fmt,plot=,0
outfile=""
if in sys.argv:
print(main.__doc__)
sys.exit()
elif in sys.argv:
ind=sys.argv.index()
file=sys.argv[ind+1]
f=open(file,)
data=f.readlines()
if in sys.argv:
ind=sys.argv.index()
outfile=open(sys.argv[ind+1],)
if in sys.argv: plot=1
if in sys.argv:
ind=sys.argv.index()
fmt=sys.argv[ind+1]
DIs,nDIs,rDIs= [],[],[]
for line in data:
if in line:
rec=line.split()
else:
rec=line.split()
DIs.append([float(rec[0]),float(rec[1])])
ppars=pmag.doprinc(DIs)
for rec in DIs:
angle=pmag.angle([rec[0],rec[1]],[ppars[],ppars[]])
if angle>90.:
rDIs.append(rec)
else:
nDIs.append(rec)
if len(rDIs) >=10 or len(nDIs) >=10:
D1,I1=[],[]
QQ={:1,:2}
pmagplotlib.plot_init(QQ[],5,5)
pmagplotlib.plot_init(QQ[],5,5)
if len(nDIs) < 10:
ppars=pmag.doprinc(rDIs)
Drbar,Irbar=ppars[]-180.,-ppars[]
Nr=len(rDIs)
for di in rDIs:
d,irot=pmag.dotilt(di[0],di[1],Drbar-180.,90.-Irbar)
drot=d-180.
if drot<0:drot=drot+360.
D1.append(drot)
I1.append(irot)
Dtit=
Itit=
else:
ppars=pmag.doprinc(nDIs)
Dnbar,Inbar=ppars[],ppars[]
Nn=len(nDIs)
for di in nDIs:
d,irot=pmag.dotilt(di[0],di[1],Dnbar-180.,90.-Inbar)
drot=d-180.
if drot<0:drot=drot+360.
D1.append(drot)
I1.append(irot)
Dtit=
Itit=
Mu_n,Mu_ncr=pmagplotlib.plot_qq_unf(QQ[],D1,Dtit)
Me_n,Me_ncr=pmagplotlib.plot_qq_exp(QQ[],I1,Itit)
if outfile!="":
if Mu_n<=Mu_ncr and Me_n<=Me_ncr:
F=
else:
F=
outstring=%(Dnbar,Inbar,Nn,Mu_n,Mu_ncr,Me_n,Me_ncr,F)
outfile.write(outstring)
else:
print()
sys.exit()
if len(rDIs)>10 and len(nDIs)>10:
D2,I2=[],[]
QQ[]=3
QQ[]=4
pmagplotlib.plot_init(QQ[],5,5)
pmagplotlib.plot_init(QQ[],5,5)
ppars=pmag.doprinc(rDIs)
Drbar,Irbar=ppars[]-180.,-ppars[]
Nr=len(rDIs)
for di in rDIs:
d,irot=pmag.dotilt(di[0],di[1],Drbar-180.,90.-Irbar)
drot=d-180.
if drot<0:drot=drot+360.
D2.append(drot)
I2.append(irot)
Dtit=
Itit=
Mu_r,Mu_rcr=pmagplotlib.plot_qq_unf(QQ[],D2,Dtit)
Me_r,Me_rcr=pmagplotlib.plot_qq_exp(QQ[],I2,Itit)
if outfile!="":
if Mu_r<=Mu_rcr and Me_r<=Me_rcr:
F=
else:
F=
outstring=%(Drbar,Irbar,Nr,Mu_r,Mu_rcr,Me_r,Me_rcr,F)
outfile.write(outstring)
files={}
for key in list(QQ.keys()):
files[key]=file++key++fmt
if pmagplotlib.isServer:
black =
purple =
titles={}
titles[]=
EQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(QQ,files)
elif plot==1:
pmagplotlib.save_plots(QQ,files)
else:
pmagplotlib.draw_figs(QQ)
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a": pmagplotlib.save_plots(QQ,files) | NAME
fishqq.py
DESCRIPTION
makes qq plot from dec,inc input data
INPUT FORMAT
takes dec/inc pairs in space delimited file
SYNTAX
fishqq.py [command line options]
OPTIONS
-h help message
-f FILE, specify file on command line
-F FILE, specify output file for statistics
-sav save and quit [saves as input file name plus fmt extension]
-fmt specify format for output [png, eps, svg, pdf]
OUTPUT:
Dec Inc N Mu Mu_crit Me Me_crit Y/N
where direction is the principal component and Y/N is Fisherian or not
separate lines for each mode with N >=10 (N and R) |
def show_vcs_output_vcs_nodes_vcs_node_info_switch_fcf_mac(self, **kwargs):
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
switch_fcf_mac = ET.SubElement(vcs_node_info, "switch-fcf-mac")
switch_fcf_mac.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def seoify_hyperlink(hyperlink):
last_slash = hyperlink.rfind()
return re.sub(r, , hyperlink[last_slash + 1:]) | Modify a hyperlink to make it SEO-friendly by replacing
hyphens with spaces and trimming multiple spaces.
:param hyperlink: URL to attempt to grab SEO from |
def update(self, scope, at=0):
if hasattr(scope, ) and not at:
self._mixins.update(scope._mixins)
self[at][].update(scope[at][])
self[at][].extend(scope[at][])
self[at][].extend(scope[at][]) | Update scope. Add another scope to this one.
Args:
scope (Scope): Scope object
Kwargs:
at (int): Level to update |
def transaction_status(transaction):
if not transaction or not transaction.get():
return blank()
return FormattedItem(
transaction[].get(),
transaction[].get()) | Returns a FormattedItem describing the given transaction.
:param item: An object capable of having an active transaction |
def listify_string(something):
if isinstance(something, (str, six.text_type)):
return [something]
elif something is not None:
return list(something)
else:
return list() | Takes *something* and make it a list.
*something* is either a list of strings or a string, in which case the
function returns a list containing the string.
If *something* is None, an empty list is returned. |
def get_gradebook_column_gradebook_assignment_session(self, proxy):
if not self.supports_gradebook_column_gradebook_assignment():
raise errors.Unimplemented()
return sessions.GradebookColumnGradebookAssignmentSession(proxy=proxy, runtime=self._runtime) | Gets the session for assigning gradebook column to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnGradebookAssignmentSession)
- a ``GradebookColumnGradebookAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.* |
def profile_config_set(name, config_key, config_value,
remote_addr=None,
cert=None, key=None, verify_cert=True):
*
profile = profile_get(
name,
remote_addr,
cert,
key,
verify_cert,
_raw=True
)
return _set_property_dict_item(
profile, , config_key, config_value
) | Set a profile config item.
name :
The name of the profile to set the config item to.
config_key :
The items key.
config_value :
Its items value.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_set autostart boot.autostart 0 |
def community_post_votes(self, post_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/votes
api_path = "/api/v2/community/posts/{post_id}/votes.json"
api_path = api_path.format(post_id=post_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/votes#list-votes |
def get_content_charset(self, failobj=None):
missing = object()
charset = self.get_param(, missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
pcharset = charset[0] or
try:
except UnicodeError:
return failobj
return charset.lower() | Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned. |
def permission_to_pyramid_acls(permissions):
acls = []
for perm in permissions:
if perm.type == "user":
acls.append((Allow, perm.user.id, perm.perm_name))
elif perm.type == "group":
acls.append((Allow, "group:%s" % perm.group.id, perm.perm_name))
return acls | Returns a list of permissions in a format understood by pyramid
:param permissions:
:return: |
def run_rsync_project():
excludes =
for exclude in settings.RSYNC_EXCLUDES:
excludes += " --exclude ".format(exclude)
command = "rsync -avz --stats --delete {0} {1} {2}".format(
excludes, settings.FAB_SETTING(),
settings.FAB_SETTING())
run(command) | Copies the project from the git repository to it's destination folder.
This has the nice side effect of rsync deleting all ``.pyc`` files and
removing other files that might have been left behind by sys admins messing
around on the server.
Usage::
fab <server> run_rsync_project |
def refresh_modules(self, module_string=None, exact=True):
if not module_string:
if time.time() > (self.last_refresh_ts + 0.1):
self.last_refresh_ts = time.time()
else:
return
update_i3status = False
for name, module in self.output_modules.items():
if (
module_string is None
or (exact and name == module_string)
or (not exact and name.startswith(module_string))
):
if module["type"] == "py3status":
if self.config["debug"]:
self.log("refresh py3status module {}".format(name))
module["module"].force_update()
else:
if self.config["debug"]:
self.log("refresh i3status module {}".format(name))
update_i3status = True
if update_i3status:
self.i3status_thread.refresh_i3status() | Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes. |
def update_milestone(id, **kwargs):
data = update_milestone_raw(id, **kwargs)
if data:
return utils.format_json(data) | Update a ProductMilestone |
def space_list(args):
r = fapi.list_workspaces()
fapi._check_response_code(r, 200)
spaces = []
project = args.project
if project:
project = re.compile( + project)
for space in r.json():
ns = space[][]
if project and not project.match(ns):
continue
ws = space[][]
spaces.append(ns + + ws)
return sorted(spaces, key=lambda s: s.lower()) | List accessible workspaces, in TSV form: <namespace><TAB>workspace |
async def processClaims(self, allClaims: Dict[ID, Claims]):
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res | Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition. |
def add_done_callback(self, future, callback):
if future is None:
raise bridge.BridgeReferenceError(
"Tried to add a callback to a nonexistent Future. "
"Make sure you pass the `returns` argument to your JavaMethod")
return self.loop.add_done_callback(future, callback) | Add a callback on a future object put here so it can be
implemented with different event loops.
Parameters
-----------
future: Future or Deferred
Future implementation for the current EventLoop
callback: callable
Callback to invoke when the future is done |
def _IfdEntryFactory(stream_rdr, offset):
ifd_entry_classes = {
TIFF_FLD.ASCII: _AsciiIfdEntry,
TIFF_FLD.SHORT: _ShortIfdEntry,
TIFF_FLD.LONG: _LongIfdEntry,
TIFF_FLD.RATIONAL: _RationalIfdEntry,
}
field_type = stream_rdr.read_short(offset, 2)
if field_type in ifd_entry_classes:
entry_cls = ifd_entry_classes[field_type]
else:
entry_cls = _IfdEntry
return entry_cls.from_stream(stream_rdr, offset) | Return an |_IfdEntry| subclass instance containing the value of the
directory entry at *offset* in *stream_rdr*. |
def sample(self, data, sample_size=15000,
blocked_proportion=0.5, original_length=None):
self._checkData(data)
self.active_learner = self.ActiveLearner(self.data_model)
self.active_learner.sample_combo(data, blocked_proportion,
sample_size, original_length) | Draw a sample of record pairs from the dataset
(a mix of random pairs & pairs of similar records)
and initialize active learning with this sample
Arguments: data -- Dictionary of records, where the keys are
record_ids and the values are dictionaries with the keys being
field names
sample_size -- Size of the sample to draw
blocked_proportion -- Proportion of the sample that will be blocked
original_length -- Length of original data, should be set if `data` is
a sample of full data |
def find_disulfide_bridges(self, threshold=3.0):
if self.structure:
parsed = self.structure
else:
parsed = self.parse_structure()
if not parsed:
log.error(.format(self.id))
return
disulfide_bridges = ssbio.protein.structure.properties.residues.search_ss_bonds(parsed.first_model,
threshold=threshold)
if not disulfide_bridges:
log.debug(.format(self.id))
for chain, bridges in disulfide_bridges.items():
self.chains.get_by_id(chain).seq_record.annotations[] = disulfide_bridges[chain]
log.debug(.format(chain, len(bridges)))
log.debug(s seq_record letter_annotations'.format(chain)) | Run Biopython's search_ss_bonds to find potential disulfide bridges for each chain and store in ChainProp.
Will add a list of tuple pairs into the annotations field, looks like this::
[ ((' ', 79, ' '), (' ', 110, ' ')),
((' ', 174, ' '), (' ', 180, ' ')),
((' ', 369, ' '), (' ', 377, ' '))]
Where each pair is a pair of cysteine residues close together in space. |
def load_file(self, filename):
self._say("Loading file: " + filename)
fh = codecs.open(filename, , )
lines = fh.readlines()
fh.close()
self._say("Parsing " + str(len(lines)) + " lines of code from " + filename)
self._parse(filename, lines) | Load and parse a RiveScript document.
:param str filename: The path to a RiveScript file. |
def get_uri(source):
import gst
src_info = source_info(source)
if src_info[]:
return get_uri(src_info[])
elif gst.uri_is_valid(source):
uri_protocol = gst.uri_get_protocol(source)
if gst.uri_protocol_is_supported(gst.URI_SRC, uri_protocol):
return source
else:
raise IOError()
else:
raise IOError( % source) | Check a media source as a valid file or uri and return the proper uri |
def subscribe(self, requested_timeout=None, auto_renew=False):
class AutoRenewThread(threading.Thread):
def __init__(self, interval, stop_flag, sub, *args, **kwargs):
super(AutoRenewThread, self).__init__(*args, **kwargs)
self.interval = interval
self.sub = sub
self.stop_flag = stop_flag
self.daemon = True
def run(self):
sub = self.sub
stop_flag = self.stop_flag
interval = self.interval
while not stop_flag.wait(interval):
log.info("Autorenewing subscription %s", sub.sid)
sub.renew()
self.requested_timeout = requested_timeout
if self._has_been_unsubscribed:
raise SoCoException(
)
service = self.service
if not event_listener.is_running:
event_listener.start(service.soco)
ip_address, port = event_listener.address
if config.EVENT_ADVERTISE_IP:
ip_address = config.EVENT_ADVERTISE_IP
headers = {
: .format(ip_address, port),
:
}
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
with _subscriptions_lock:
response = requests.request(
, service.base_url + service.event_subscription_url,
headers=headers)
response.raise_for_status()
self.sid = response.headers[]
timeout = response.headers[]
if timeout.lower() == :
self.timeout = None
else:
self.timeout = int(timeout.lstrip())
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Subscribed to %s, sid: %s",
service.base_url + service.event_subscription_url, self.sid)
_subscriptions[self.sid] = self
atexit.register(self.unsubscribe)
if not auto_renew:
return
interval = self.timeout * 85 / 100
auto_renew_thread = AutoRenewThread(
interval, self._auto_renew_thread_flag, self)
auto_renew_thread.start() | Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`. |
def getDatabaseFileSize(self):
if DISABLE_PERSISTENT_CACHING:
return "?"
size = os.path.getsize(self.__db_filepath)
if size > 1000000000:
size = "%0.3fGB" % (size / 1000000000)
elif size > 1000000:
size = "%0.2fMB" % (size / 1000000)
elif size > 1000:
size = "%uKB" % (size // 1000)
else:
size = "%uB" % (size)
return size | Return the file size of the database as a pretty string. |
def extractFromURL(url,
cache=False,
cacheDir=,
verbose=False,
encoding=None,
filters=None,
userAgent=None,
timeout=5,
blur=5,
ignore_robotstxt=False,
only_mime_types=None,
raw=False):
blur = int(blur)
try:
import chardet
except ImportError as e:
raise ImportError(("%s\nYou need to install chardet.\n" + \
"e.g. sudo pip install chardet") % e)
if only_mime_types and isinstance(only_mime_types, six.text_type):
only_mime_types = only_mime_types.split()
if cache:
if not os.path.isdir(cacheDir):
cache_perms = 488
os.makedirs(cacheDir, cache_perms)
cache_key = generate_key(url)
cached_content = cache_get(cacheDir, cache_key)
if cached_content:
return cached_content
if not ignore_robotstxt:
if not check_robotstxt(url, cache, cacheDir, userAgent=userAgent):
if verbose: print("Request denied by robots.txt")
return
if verbose: print( % url)
html = fetch(
url,
timeout=timeout,
userAgent=userAgent,
only_mime_types=only_mime_types)
if not html:
return
if not encoding:
if isinstance(html, unicode):
html = html.encode(, )
encoding_opinion = chardet.detect(html)
encoding = encoding_opinion[]
if verbose: print( % encoding)
if verbose: print( % len(html))
if cache:
raw_key = generate_key(url, "%s.raw")
cache_set(cacheDir, raw_key, html)
if filters:
filter_names = map(str.strip, filters.split())
for filter_name in filter_names:
fltr = get_filter(filter_name)
html = fltr(html)
html = tidyHTML(html)
if verbose: print( % len(html))
if not html:
return
html = unicode(html, encoding=encoding, errors=)
if raw:
return html
res = extractFromHTML(html, blur=blur)
assert isinstance(res, unicode)
res = res.encode(encoding, )
if cache:
cache_set(cacheDir, cache_key, res)
return res | Extracts text from a URL.
Parameters:
url := string
Remote URL or local filename where HTML will be read.
cache := bool
True=store and retrieve url from cache
False=always retrieve url from the web
cacheDir := str
Directory where cached url contents will be stored.
verbose := bool
True=print logging messages
False=print no output
encoding := string
The encoding of the page contents.
If none given, it will attempt to guess the encoding.
See http://docs.python.org/howto/unicode.html for further info
on Python Unicode and encoding support.
filters := string
Comma-delimited list of filters to apply before parsing.
only_mime_types := list of strings
A list of mime-types to limit parsing to.
If the mime-type of the raw-content retrieved does not match
one of these, a value of None will be returned. |
def successors(self):
if not self.children:
return
for part in self.children:
yield part
for subpart in part.successors():
yield subpart | Yield Compounds below self in the hierarchy.
Yields
-------
mb.Compound
The next Particle below self in the hierarchy |
def _CreateComplexTypeFromData(
self, elem_type, type_is_override, data, set_type_attrs):
elem_arguments = dict(elem_type.elements)
instantiated_arguments = {
k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs)
for k, v in data if k != }
if set_type_attrs:
found_type_attr = next((e_name for e_name, _ in elem_type.elements
if e_name.endswith()), None)
if found_type_attr and type_is_override:
instantiated_arguments[found_type_attr] = elem_type.qname.localname
return elem_type(**instantiated_arguments) | Initialize a SOAP element with specific data.
Args:
elem_type: The type of the element to create.
type_is_override: A boolean specifying if the type is being overridden.
data: The data to hydrate the type with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An fully initialized SOAP element. |
def stylize(self, images, style=None, verbose=True, max_size=800, batch_size = 4):
if(batch_size < 1):
raise _ToolkitError(" must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from mxnet import gluon as _gluon
from .._mxnet import _mxnet_utils
set_of_all_idx = self._style_indices()
style, single_style = self._style_input_check(style)
if isinstance(max_size, _six.integer_types):
input_shape = (max_size, max_size)
else:
input_shape = max_size[::-1]
images, unpack = self._canonize_content_input(images, single_style=single_style)
dataset_size = len(images)
output_size = dataset_size * len(style)
batch_size_each = min(batch_size, output_size)
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=batch_size_each)
if num_mxnet_gpus == 0:
batch_size_each = 1
loader_type =
else:
loader_type =
self._model.batch_size = batch_size_each
self._model.hybridize()
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size_each)
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
last_time = 0
if dataset_size == 0:
raise _ToolkitError("SFrame cannot be empty")
content_feature = _tkutl._find_only_image_column(images)
_raise_error_if_not_training_sframe(images, content_feature)
max_h = 0
max_w = 0
oversized_count = 0
for img in images[content_feature]:
if img.height > input_shape[0] or img.width > input_shape[1]:
oversized_count += 1
max_h = max(img.height, max_h)
max_w = max(img.width, max_w)
if input_shape[0] > max_h:
input_shape = (max_h, input_shape[1])
if input_shape[1] > max_w:
input_shape = (input_shape[0], max_w)
format(curr_image=count, max_n=output_size, width=len(str(output_size))))
last_time = cur_time
return unpack(sb.close()) | Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns] |
def pick_flat_z(data):
zmes = []
for i in data[]:
zmes.append(i[:, 0])
return np.asarray(zmes) | Generate a 2D array of the quasiparticle weight by only selecting the
first particle data |
def get_covalent_bonds(self, tol=0.2):
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds | Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds |
def clear_preview(self):
try:
rm = self.my_osid_object._get_provider_manager()
except AttributeError:
rm = self.my_osid_object_form._get_provider_manager()
try:
aas = rm.get_asset_admin_session_for_repository(
Id(self.my_osid_object._my_map[][0]))
except AttributeError:
aas = rm.get_asset_admin_session_for_repository(
Id(self.my_osid_object_form._my_map[][0]))
if not in self.my_osid_object_form._my_map[]:
raise NotFound()
aas.delete_asset(
Id(self.my_osid_object_form._my_map[][][]))
del self.my_osid_object_form._my_map[][] | stub |
def _contextualise(self):
deja_vues = []
for rank in reversed(self.taxonomy):
clades = [e for e in self.hierarchy[rank] if e[1]]
uniques = [e for e in clades if len(e[0]) == 1]
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
for e in uniques:
ident = e[0][0].ident
self[ident][] = e[1]
deja_vues.append(ident) | Determine contextual idents (cidents) |
def close_hover(self, element, use_js=False):
try:
if use_js:
self._js_hover(, element)
else:
actions = ActionChains(self.driver)
actions.move_to_element_with_offset(element, -100, -100)
actions.reset_actions()
except (StaleElementReferenceException, MoveTargetOutOfBoundsException):
return True | Close hover by moving to a set offset "away" from the element being hovered.
:param element: element that triggered the hover to open
:param use_js: use javascript to close hover
:return: None |
def get_entity_propnames(entity):
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() +
ins.mapper.relationships.keys()
) | Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set |
def CreateGroup(self, GroupName):
groups = self.CustomGroups
self._DoCommand( % tounicode(GroupName))
for g in self.CustomGroups:
if g not in groups and g.DisplayName == GroupName:
return g
raise SkypeError(0, ) | Creates a custom contact group.
:Parameters:
GroupName : unicode
Group name.
:return: A group object.
:rtype: `Group`
:see: `DeleteGroup` |
def add_sink(self, sink):
if sink is not None:
self.sinks.add(sink)
if hasattr(sink, ):
sink.start() | Add a vehicle data sink to the instance. ``sink`` should be a
sub-class of ``DataSink`` or at least have a ``receive(message,
**kwargs)`` method.
The sink will be started if it is startable. (i.e. it has a ``start()``
method). |
def managed(name,
venv_bin=None,
requirements=None,
system_site_packages=False,
distribute=False,
use_wheel=False,
clear=False,
python=None,
extra_search_dir=None,
never_download=None,
prompt=None,
user=None,
cwd=None,
index_url=None,
extra_index_url=None,
pre_releases=False,
no_deps=False,
pip_download=None,
pip_download_cache=None,
pip_exists_action=None,
pip_ignore_installed=False,
proxy=None,
use_vt=False,
env_vars=None,
no_use_wheel=False,
pip_upgrade=False,
pip_pkgs=None,
pip_no_cache_dir=False,
pip_cache_dir=None,
process_dependency_links=False,
no_binary=None,
**kwargs):
/usr/local/bin/
ret = {: name, : True, : , : {}}
if not in __salt__:
ret[] = False
ret[] =
return ret
if salt.utils.platform.is_windows():
venv_py = os.path.join(name, , )
else:
venv_py = os.path.join(name, , )
venv_exists = os.path.exists(venv_py)
return ret | Create a virtualenv and optionally manage it with pip
name
Path to the virtualenv.
venv_bin: virtualenv
The name (and optionally path) of the virtualenv command. This can also
be set globally in the minion config file as ``virtualenv.venv_bin``.
requirements: None
Path to a pip requirements file. If the path begins with ``salt://``
the file will be transferred from the master file server.
use_wheel: False
Prefer wheel archives (requires pip >= 1.4).
python : None
Python executable used to build the virtualenv
user: None
The user under which to run virtualenv and pip.
cwd: None
Path to the working directory where `pip install` is executed.
no_deps: False
Pass `--no-deps` to `pip install`.
pip_exists_action: None
Default action of pip when a path already exists: (s)witch, (i)gnore,
(w)ipe, (b)ackup.
proxy: None
Proxy address which is passed to `pip install`.
env_vars: None
Set environment variables that some builds will depend on. For example,
a Python C-module may have a Makefile that needs INCLUDE_PATH set to
pick up a header file while compiling.
no_use_wheel: False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
pip_upgrade: False
Pass `--upgrade` to `pip install`.
pip_pkgs: None
As an alternative to `requirements`, pass a list of pip packages that
should be installed.
process_dependency_links: False
Run pip install with the --process_dependency_links flag.
.. versionadded:: 2017.7.0
Also accepts any kwargs that the virtualenv module will. However, some
kwargs, such as the ``pip`` option, require ``- distribute: True``.
.. code-block:: yaml
/var/www/myvirtualenv.com:
virtualenv.managed:
- system_site_packages: False
- requirements: salt://REQUIREMENTS.txt
- env_vars:
PATH_VAR: '/usr/local/bin/' |
def _crps_cdf_single(x, cdf_or_dist, xmin=None, xmax=None, tol=1e-6):
cdf = getattr(cdf_or_dist, , cdf_or_dist)
assert callable(cdf)
if (tol is not None) and (cdf(xmin) >= tol) or (cdf(xmax) <= (1. - tol)):
raise ValueError(
% ( if cdf(xmin) >= tol else ))
def lhs(y):
return np.square(cdf(y))
lhs_int, lhs_tol = integrate.quad(lhs, xmin, x)
if (tol is not None) and (lhs_tol >= 0.5 * tol):
raise ValueError(
%
(lhs_tol, lhs_int))
def rhs(y):
return np.square(1. - cdf(y))
rhs_int, rhs_tol = integrate.quad(rhs, x, xmax)
if (tol is not None) and (rhs_tol >= 0.5 * tol):
raise ValueError(
%
(rhs_tol, rhs_int))
return lhs_int + rhs_int | See crps_cdf for docs. |
def mean(data, n=3, **kwargs):
if len(data[-n:]) < n:
forecast = np.nan
else:
forecast = np.mean(data[-n:])
return forecast | The mean forecast for the next point is the mean value of the previous ``n`` points in
the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate the mean
Returns:
float: a single-valued forecast for the next value in the series. |
def create_core(self, thing_name, config_file, region=None,
cert_dir=None, account_id=None,
policy_name=, profile_name=None):
config = GroupConfigFile(config_file=config_file)
if config.is_fresh() is False:
raise ValueError(
"Config file already tracking previously created core or group"
)
if region is None:
region = self._region
if account_id is None:
account_id = self._account_id
keys_cert, thing = self.create_thing(thing_name, region, cert_dir)
cert_arn = keys_cert[]
config[] = {
: thing[],
: cert_arn,
: keys_cert[],
: thing_name
}
logging.debug("create_core cfg:{0}".format(config))
logging.info("Thing: associated with cert:".format(
thing_name, cert_arn))
core_policy = self.get_core_policy(
core_name=thing_name, account_id=account_id, region=region)
iot_client = _get_iot_session(region=region, profile_name=profile_name)
self._create_attach_thing_policy(
cert_arn, core_policy,
iot_client=iot_client, policy_name=policy_name
)
misc = config[]
misc[] = policy_name
config[] = misc | Using the `thing_name` value, creates a Thing in AWS IoT, attaches and
downloads new keys & certs to the certificate directory, then records
the created information in the local config file for inclusion in the
Greengrass Group as a Greengrass Core.
:param thing_name: the name of the thing to create and use as a
Greengrass Core
:param config_file: config file used to track the Greengrass Core in the
group
:param region: the region in which to create the new core.
[default: us-west-2]
:param cert_dir: the directory in which to store the thing's keys and
certs. If `None` then use the current directory.
:param account_id: the account_id in which to create the new core.
[default: None]
:param policy_name: the name of the policy to associate with the device.
[default: 'ggc-default-policy']
:param profile_name: the name of the `awscli` profile to use.
[default: None] |
def showLayer(self, title=, debugText=):
img = PIL.Image.fromarray(self.data, )
if debugText!=:
draw = PIL.ImageDraw.Draw(img)
font = PIL.ImageFont.truetype("DejaVuSansMono.ttf", 24)
draw.text((0, 0),debugText,(255,255,255),font=font)
img.show(title=title) | Shows the single layer.
:param title: A string with the title of the window where to render the image.
:param debugText: A string with some text to render over the image.
:rtype: Nothing. |
def solveConsPrefShock(solution_next,IncomeDstn,PrefShkDstn,
LivPrb,DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,
aXtraGrid,vFuncBool,CubicBool):
solver = ConsPrefShockSolver(solution_next,IncomeDstn,PrefShkDstn,LivPrb,
DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,aXtraGrid,
vFuncBool,CubicBool)
solver.prepareToSolve()
solution = solver.solve()
return solution | Solves a single period of a consumption-saving model with preference shocks
to marginal utility. Problem is solved using the method of endogenous gridpoints.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
PrefShkDstn : [np.array]
Discrete distribution of the multiplicative utility shifter. Order:
probabilities, preference shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution: ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using linear splines), a marginal value
function vPfunc, a minimum acceptable level of normalized market re-
sources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc. The consumption
function is defined over normalized market resources and the preference
shock, c = cFunc(m,PrefShk), but the (marginal) value function is defined
unconditionally on the shock, just before it is revealed. |
def _pyshark_read_frame(self):
from pcapkit.toolkit.pyshark import packet2dict, tcp_traceflow
packet = next(self._extmp)
self._frnum = int(packet.number)
self._proto = packet.frame_info.protocols
if self._flag_v:
print(f)
frnum = f
if not self._flag_q:
info = packet2dict(packet)
if self._flag_f:
ofile = self._ofile(f)
ofile(info, name=frnum)
else:
self._ofile(info, name=frnum)
if self._flag_d:
setattr(packet, , packet2dict)
self._frame.append(packet)
if self._flag_t:
flag, data = tcp_traceflow(packet)
if flag:
self._trace(data)
return packet | Read frames. |
def to_bedtool(iterator):
def gen():
for i in iterator:
yield helpers.asinterval(i)
return pybedtools.BedTool(gen()) | Convert any iterator into a pybedtools.BedTool object.
Note that the supplied iterator is not consumed by this function. To save
to a temp file or to a known location, use the `.saveas()` method of the
returned BedTool object. |
def get_next_step(self):
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose != layer_purpose_aggregation:
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()
else:
subcategory = {: None}
field_groups = get_field_groups(
layer_purpose[], subcategory[])
compulsory_field = get_compulsory_fields(
layer_purpose[], subcategory[])
replace_null=True,
in_group=False)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields
return self.parent.step_kw_source | Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep |
def nth(series, n, order_by=None):
if order_by is not None:
series = order_series_by(series, order_by)
try:
return series.iloc[n]
except:
return np.nan | Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization. |
def list_customer_users(self, customer_id):
content = self._fetch("/customer/users/%s" % customer_id)
return map(lambda x: FastlyUser(self, x), content) | List all users from a specified customer id. |
def create_control(self, pid, callback, callback_parsed=None):
logger.info("create_control(pid=\"%s\", control_cb=%s) [lid=%s]", pid, callback, self.__lid)
if callback_parsed:
callback = self._client._get_parsed_control_callback(callback_parsed, callback)
return self.__create_point(R_CONTROL, pid, control_cb=callback) | Create a control for this Thing with a local point id (pid) and a control request feedback
Returns a new [Control](Point.m.html#IoticAgent.IOT.Point.Control) object
or the existing one if the Control already exists
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local id of your Control
`callback` (required) (function reference) callback function to invoke on receipt of a control request.
The callback receives a single dict argument, with keys of:
#!python
'data' # (decoded or raw bytes)
'mime' # (None, unless payload was not decoded and has a mime type)
'subId' # (the global id of the associated subscripion)
'entityLid' # (local id of the Thing to which the control belongs)
'lid' # (local id of control)
'confirm' # (whether a confirmation is expected)
'requestId' # (required for sending confirmation)
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of control data. This
is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both
`callback_parsed` and `callback` have been specified, the former takes precedence and `callback` is only called
if the point data could not be parsed according to its current value description.
`NOTE`: `callback_parsed` can only be used if `auto_encode_decode` is enabled for the client instance. |
def discard_member(self, member, pipe=None):
pipe = self.redis if pipe is None else pipe
pipe.zrem(self.key, self._pickle(member)) | Remove *member* from the collection, unconditionally. |
def HA2(credentials, request, algorithm):
if credentials.get("qop") == "auth" or credentials.get() is None:
return H(b":".join([request[].encode(), request[].encode()]), algorithm)
elif credentials.get("qop") == "auth-int":
for k in , , :
if k not in request:
raise ValueError("%s required" % k)
A2 = b":".join([request[].encode(),
request[].encode(),
H(request[], algorithm).encode()])
return H(A2, algorithm)
raise ValueError | Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.