code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|
def xray_heartbeat_batch_handler(self, unused_channel, data):
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
data, 0)
heartbeat_data = gcs_entries.Entries(0)
message = (ray.gcs_utils.HeartbeatBatchTableData.
GetRootAsHeartbeatBatchTableData(heartbeat_data, 0))
for j in range(message.BatchLength()):
heartbeat_message = message.Batch(j)
num_resources = heartbeat_message.ResourcesAvailableLabelLength()
static_resources = {}
dynamic_resources = {}
for i in range(num_resources):
dyn = heartbeat_message.ResourcesAvailableLabel(i)
static = heartbeat_message.ResourcesTotalLabel(i)
dynamic_resources[dyn] = (
heartbeat_message.ResourcesAvailableCapacity(i))
static_resources[static] = (
heartbeat_message.ResourcesTotalCapacity(i))
client_id = ray.utils.binary_to_hex(heartbeat_message.ClientId())
ip = self.raylet_id_to_ip_map.get(client_id)
if ip:
self.load_metrics.update(ip, static_resources,
dynamic_resources)
else:
logger.warning(
"Monitor: "
"could not find ip for client {}".format(client_id)) | Handle an xray heartbeat batch message from Redis. |
def _match_maximum_decimal(self, match_key, decimal_value, match):
if decimal_value is None:
raise NullArgument()
if match is None:
match = True
if match:
ltegt =
else:
ltegt =
if match_key in self._query_terms:
self._query_terms[match_key][ltegt] = decimal_value
else:
self._query_terms[match_key] = {ltegt: decimal_value} | Matches a minimum decimal value |
def variables(self, name):
if isinstance(name, tuple):
name = name[0]
if name.startswith():
name = + name[2:-1]
i = len(self)
while i >= 0:
i -= 1
if name in self[i][]:
return self[i][][name]
return False | Search for variable by name. Searches scope top down
Args:
name (string): Search term
Returns:
Variable object OR False |
def reg_incomplete_beta(a, b, x):
if (x == 0):
return 0
elif (x == 1):
return 1
else:
lbeta = (math.lgamma(a + b) - math.lgamma(a) - math.lgamma(b) +
a * math.log(x) + b * math.log(1 - x))
if (x < (a + 1) / (a + b + 2)):
return math.exp(lbeta) * __contfractbeta(a, b, x) / a
else:
return 1 - math.exp(lbeta) * __contfractbeta(b, a, 1 - x) / b | Incomplete beta function; code translated from: Numerical Recipes in C.
:param a: a > 0
:param b: b > 0
:param x: 0 <= x <= 1. |
def add_input(cmd, immediate=False):
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd) | add some command input to be processed |
def explode(self):
points = np.column_stack((
self.points,
self.points)).ravel()[1:-1].reshape((-1, 2))
exploded = [Line(i) for i in points]
return exploded | If the current Line entity consists of multiple line
break it up into n Line entities.
Returns
----------
exploded: (n,) Line entities |
def ingest(event):
set_service_status(Service.INGEST, ServiceStatus.BUSY)
notify.notify()
recording_state(event.uid, )
update_event_status(event, Status.UPLOADING)
service = config()
service = service[randrange(0, len(service))]
logger.info( + service)
logger.info()
mediapackage = http_request(service + )
prop =
dcns =
for attachment in event.get_data().get():
data = attachment.get()
if attachment.get() == prop:
workflow_def, workflow_config = get_config_params(data)
elif attachment.get() == and dcns in data:
name = attachment.get(, ).rsplit(, 1)[0]
logger.info( % name)
fields = [(, mediapackage),
(, % name),
(, data.encode())]
mediapackage = http_request(service + , fields)
for (flavor, track) in event.get_tracks():
logger.info(.format(flavor, track))
track = track.encode(, )
fields = [(, mediapackage), (, flavor),
(, (pycurl.FORM_FILE, track))]
mediapackage = http_request(service + , fields)
logger.info()
fields = [(, mediapackage)]
if workflow_def:
fields.append((, workflow_def))
if event.uid:
fields.append((,
event.uid.encode(, )))
fields += workflow_config
mediapackage = http_request(service + , fields)
recording_state(event.uid, )
update_event_status(event, Status.FINISHED_UPLOADING)
notify.notify()
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
logger.info() | Ingest a finished recording to the Opencast server. |
def set_stderrthreshold(s):
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
", , , , and . "
.format(s, type(s))) | Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value. |
def sortkey(x):
k = str(x[1]).zfill(3) + str(x[2]).zfill(3) + str(x[3]).zfill(3)
return k | Return '001002003' for (colorname, 1, 2, 3) |
def ServiceWorker_deliverPushMessage(self, origin, registrationId, data):
assert isinstance(origin, (str,)
), "Argument must be of type str. Received type: " % type(
origin)
assert isinstance(registrationId, (str,)
), "Argument must be of type str. Received type: " % type(
registrationId)
assert isinstance(data, (str,)
), "Argument must be of type str. Received type: " % type(
data)
subdom_funcs = self.synchronous_command(,
origin=origin, registrationId=registrationId, data=data)
return subdom_funcs | Function path: ServiceWorker.deliverPushMessage
Domain: ServiceWorker
Method name: deliverPushMessage
Parameters:
Required arguments:
'origin' (type: string) -> No description
'registrationId' (type: string) -> No description
'data' (type: string) -> No description
No return value. |
def sample_mgrid(self, mgrid: np.array) -> np.array:
mgrid = np.ascontiguousarray(mgrid, np.float32)
if mgrid.shape[0] != self.dimensions:
raise ValueError(
"mgrid.shape[0] must equal self.dimensions, "
"%r[0] != %r" % (mgrid.shape, self.dimensions)
)
out = np.ndarray(mgrid.shape[1:], np.float32)
if mgrid.shape[1:] != out.shape:
raise ValueError(
"mgrid.shape[1:] must equal out.shape, "
"%r[1:] != %r" % (mgrid.shape, out.shape)
)
lib.NoiseSampleMeshGrid(
self._tdl_noise_c,
out.size,
ffi.cast("float*", mgrid.ctypes.data),
ffi.cast("float*", out.ctypes.data),
)
return out | Sample a mesh-grid array and return the result.
The :any:`sample_ogrid` method performs better as there is a lot of
overhead when working with large mesh-grids.
Args:
mgrid (numpy.ndarray): A mesh-grid array of points to sample.
A contiguous array of type `numpy.float32` is preferred.
Returns:
numpy.ndarray: An array of sampled points.
This array has the shape: ``mgrid.shape[:-1]``.
The ``dtype`` is `numpy.float32`. |
def _compute_acq_withGradients(self, x):
means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x)
fmins = self.model.get_fmin()
f_acqu = None
df_acqu = None
for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs):
phi, Phi, u = get_quantiles(self.jitter, fmin, m, s)
f = Phi
df = -(phi/s)* (dmdx + dsdx * u)
if f_acqu is None:
f_acqu = f
df_acqu = df
else:
f_acqu += f
df_acqu += df
return f_acqu/(len(means)), df_acqu/(len(means)) | Integrated Expected Improvement and its derivative |
def _decode_v2(value):
if re.search(r, value):
raise ValueError("Unescaped in the encoded string")
decode_colons = value.replace(, )
if re.search(r, decode_colons):
raise ValueError("Unescaped in encoded string")
return decode_colons.replace(, ) | Decode ':' and '$' characters encoded by `_encode`. |
def print_usage(actions):
actions = actions.items()
actions.sort()
print( % basename(sys.argv[0]))
print( % basename(sys.argv[0]))
print()
print()
for name, (func, doc, arguments) in actions:
print( % name)
for line in doc.splitlines():
print( % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print( % (
(shortcut and % shortcut or ) + + arg
))
else:
print( % (
(shortcut and % shortcut or ) + + arg,
argtype, default
))
print() | Print the usage information. (Help screen) |
def push(self, buf):
self._src.emit(, Gst.Buffer.new_wrapped(buf)) | Push a buffer into the source. |
def get_watermark_for_topic(
kafka_client,
topic,
):
try:
kafka_client.load_metadata_for_topics()
except KafkaUnavailableError:
kafka_client.load_metadata_for_topics()
watermarks = get_topics_watermarks(
kafka_client, [topic]
)
return watermarks | This method:
* refreshes metadata for the kafka client
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param topic: the topic
:returns: dict <topic>: [ConsumerPartitionOffsets] |
def save_task(self):
task = self.request.activation.task
task.status = STATUS.ASSIGNED
task.save() | Transition to save the task and return to ``ASSIGNED`` state. |
def replace_print(fileobj=sys.stderr):
printer = _Printer(fileobj)
previous_stdout = sys.stdout
sys.stdout = printer
try:
yield printer
finally:
sys.stdout = previous_stdout | Sys.out replacer, by default with stderr.
Use it like this:
with replace_print_with(fileobj):
print "hello" # writes to the file
print "done" # prints to stdout
Args:
fileobj: a file object to replace stdout.
Yields:
The printer. |
def run(self,
ipyclient=None,
quiet=False,
force=False,
block=False,
):
if force:
for key, oldfile in self.trees:
if os.path.exists(oldfile):
os.remove(oldfile)
if os.path.exists(self.trees.info):
print("Error: set a new name for this job or use Force flag.\nFile exists: {}"\
.format(self.trees.info))
return
if not ipyclient:
proc = _call_raxml(self._command_list)
self.stdout = proc[0]
self.stderr = proc[1]
else:
lbview = ipyclient.load_balanced_view()
self.async = lbview.apply(_call_raxml, self._command_list)
if not quiet:
if not ipyclient:
if "Overall execution time" not in self.stdout:
print("Error in raxml run\n" + self.stdout)
else:
print("job {} finished successfully".format(self.params.n))
else:
print("job {} submitted to cluster".format(self.params.n)) | Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an ipyclient
is provided then the job is sent to a remote engine and an asynchronous
result object is returned which can be queried or awaited until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient. |
def in6_getLocalUniquePrefix():
tod = time.time()
i = int(tod)
j = int((tod - i)*(2**32))
tod = struct.pack("!II", i,j)
rawmac = get_if_raw_hwaddr(conf.iface6)
mac = b":".join(map(lambda x: b"%.02x" % ord(x), list(rawmac)))
eui64 = inet_pton(socket.AF_INET6, + in6_mactoifaceid(mac))[8:]
import sha
globalid = sha.new(tod+eui64).digest()[:5]
return inet_ntop(socket.AF_INET6, b + globalid + b*10) | Returns a pseudo-randomly generated Local Unique prefix. Function
follows recommandation of Section 3.2.2 of RFC 4193 for prefix
generation. |
def collect_results(rule, max_results=500, result_stream_args=None):
if result_stream_args is None:
logger.error("This function requires a configuration dict for the "
"inner ResultStream object.")
raise KeyError
rs = ResultStream(rule_payload=rule,
max_results=max_results,
**result_stream_args)
return list(rs.stream()) | Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args) |
def logical_lines(lines):
if isinstance(lines, string_types):
lines = StringIO(lines)
buf = []
for line in lines:
if buf and not line.startswith():
chunk = .join(buf).strip()
if chunk:
yield chunk
buf[:] = []
buf.append(line)
chunk = .join(buf).strip()
if chunk:
yield chunk | Merge lines into chunks according to q rules |
def _mine_send(self, tag, data):
channel = salt.transport.client.ReqChannel.factory(self.opts)
data[] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning()
return None
finally:
channel.close() | Send mine data to the master |
def dump(self, force=False):
if self._parsed is None:
self.parse()
return self._parsed[0].dump(force=force) | Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value |
def _get_style_id_from_style(self, style, style_type):
if style.type != style_type:
raise ValueError(
"assigned style is type %s, need type %s" %
(style.type, style_type)
)
if style == self.default(style_type):
return None
return style.style_id | Return the id of *style*, or |None| if it is the default style of
*style_type*. Raises |ValueError| if style is not of *style_type*. |
def alerts(self):
if self._alerts is None:
self._alerts = AlertList(self)
return self._alerts | :rtype: twilio.rest.monitor.v1.alert.AlertList |
def delete(self):
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
shutil.rmtree(self._file_path, ignore_errors=False)
self.remove_all_properties(True)
self.remove_all_locks(True) | Remove this resource or collection (recursive).
See DAVResource.delete() |
def eq(self, r1, r2):
if not is_register(r1) or not is_register(r2):
return False
if self.regs[r1] is None or self.regs[r2] is None:
return False
return self.regs[r1] == self.regs[r2] | True if values of r1 and r2 registers are equal |
def _filter_with_hooks(self, svc_event, listeners):
svc_ref = svc_event.get_service_reference()
hook_refs = self._registry.find_service_references(
SERVICE_EVENT_LISTENER_HOOK
)
if hook_refs:
ctx_listeners = {}
for listener in listeners:
context = listener.bundle_context
ctx_listeners.setdefault(context, []).append(listener)
shrinkable_ctx_listeners = ShrinkableMap(
{
context: ShrinkableList(value)
for context, value in ctx_listeners.items()
}
)
for hook_ref in hook_refs:
if not svc_ref == hook_ref:
hook_bundle = hook_ref.get_bundle()
hook_svc = self._registry.get_service(hook_bundle, hook_ref)
if hook_svc is not None:
try:
hook_svc.event(svc_event, shrinkable_ctx_listeners)
except:
self._logger.exception(
"Error calling EventListenerHook"
)
finally:
self._registry.unget_service(hook_bundle, hook_ref)
ret_listeners = set()
for bnd_listeners in shrinkable_ctx_listeners.values():
ret_listeners.update(bnd_listeners)
return ret_listeners
return listeners | Filters listeners with EventListenerHooks
:param svc_event: ServiceEvent being triggered
:param listeners: Listeners to filter
:return: A list of listeners with hook references |
def _prepend_row_index(rows, index):
if index is None or index is False:
return rows
if len(index) != len(rows):
print(, index)
print(, rows)
raise ValueError()
rows = [[v] + list(row) for v, row in zip(index, rows)]
return rows | Add a left-most index column. |
def _unpublish(self):
obj = self.content_object
actioned = False
if obj.current_version is not None:
obj.current_version = None
obj.save(update_fields=[])
actioned = True
return actioned | Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated. |
def validatePopElement(self, doc, elem, qname):
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePopElement(self._o, doc__o, elem__o, qname)
return ret | Pop the element end from the validation stack. |
def save_target_classes(self, filename):
with open(filename, ) as f:
for k, v in self._target_classes.items():
f.write(.format(k, v)) | Saves target classed for all dataset images into given file. |
def insertBulkBlock(self):
try:
body = request.body.read()
indata = cjson.decode(body)
if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])):
dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time",
self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.")
indata = validateJSONInputNoCopy("blockBulk", indata)
self.dbsBlockInsert.putBlock(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
if str(ex).find("ORA-01036") != -1:
dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex))
else:
sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler(, dbsExceptionCode[], self.logger.exception, sError) | API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict |
def create(name, **params):
**
log.debug(, params)
params = _clean_salt_variables(params)
params[] = name
api_response = requests.post(
,
params={: get_sd_auth()},
data=params
)
log.debug(, api_response)
log.debug(, api_response.content)
if api_response.status_code == 200:
try:
return salt.utils.json.loads(api_response.content)
except ValueError:
log.error(, api_response.content)
raise CommandExecutionError(
.format(api_response)
)
else:
return None | Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768 |
def _evaluate(self,R,phi=0.,t=0.):
return 0.5*R*R*(1.+2./3.*R*numpy.sin(3.*phi)) | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2017-10-16 - Written - Bovy (UofT) |
def draw_commands(self, surf):
past_abilities = {act.ability for act in self._past_actions if act.ability}
for y, cmd in enumerate(sorted(self._abilities(
lambda c: c.name != "Smart"), key=lambda c: c.name), start=2):
if self._queued_action and cmd == self._queued_action:
color = colors.green
elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey):
color = colors.green * 0.75
elif cmd.ability_id in past_abilities:
color = colors.red
else:
color = colors.yellow
hotkey = cmd.hotkey[0:3]
surf.write_screen(self._font_large, color, (0.2, y), hotkey)
surf.write_screen(self._font_large, color, (3, y), cmd.name) | Draw the list of available commands. |
def flip(f):
ensure_callable(f)
result = lambda *args, **kwargs: f(*reversed(args), **kwargs)
functools.update_wrapper(result, f, (, ))
return result | Flip the order of positonal arguments of given function. |
def delete_secret(namespace, name, apiserver_url=None, force=True):
**
ret = {: name, : True, : , : {}}
apiserver_url = _guess_apiserver(apiserver_url)
if apiserver_url is None:
return False
if not _get_namespaces(apiserver_url, namespace):
return {: name, : False,
: "Namespace doesnt delete anything there",
: {}}
url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url,
namespace, name)
res = http.query(url, method=)
if res.get():
ret[] = "Removed secret {0} in {1} namespace".format(name,
namespace)
return ret | .. versionadded:: 2016.3.0
Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name.
CLI Example:
.. code-block:: bash
salt '*' k8s.delete_secret namespace_name secret_name
salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local |
def fcsp_sa_fcsp_auth_proto_auth_type(self, **kwargs):
config = ET.Element("config")
fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
fcsp = ET.SubElement(fcsp_sa, "fcsp")
auth = ET.SubElement(fcsp, "auth")
proto = ET.SubElement(auth, "proto")
auth_type = ET.SubElement(proto, "auth-type")
auth_type.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
def getInstalledThemes(self, store):
if not store in self._getInstalledThemesCache:
self._getInstalledThemesCache[store] = (self.
_realGetInstalledThemes(store))
return self._getInstalledThemesCache[store] | Collect themes from all offerings installed on this store, or (if called
multiple times) return the previously collected list. |
async def insert(self, **kwargs):
if kwargs:
pk = await self.autoincrement()
kwargs.update({"id": pk})
await self.collection.insert_one(kwargs)
row = await self.collection.find_one({"id": pk})
else:
row = None
if row:
return {i:row[i] for i in row if i != "_id"}
else:
return {"error":500,
"reason":"Not created"} | Accepts request object, retrieves data from the one`s body
and creates new account. |
def in_op(self, other):
if not is_object(other):
raise MakeError(
,
"You can\in' operator to search in non-objects")
return other.has_property(to_string(self)) | checks if self is in other |
def element_count(self):
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception()
return result | Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises. |
def calc_adc_params(self):
adc_gains = []
baselines = []
if np.where(np.isinf(self.p_signal))[0].size:
raise ValueError()
minvals = np.nanmin(self.p_signal, axis=0)
maxvals = np.nanmax(self.p_signal, axis=0)
for ch in range(np.shape(self.p_signal)[1]):
dmin, dmax = _digi_bounds(self.fmt[ch])
dmin = dmin + 1
pmin = minvals[ch]
pmax = maxvals[ch]
if pmin == np.nan:
adc_gain = 1
baseline = 1
elif pmin == pmax:
if pmin == 0:
adc_gain = 1
baseline = 1
else:
adc_gain = abs(1 / pmin)
baseline = 0
else:
adc_gain = (dmax-dmin) / (pmax-pmin)
baseline = dmin - adc_gain*pmin
if pmin > 0:
baseline = int(np.ceil(baseline))
else:
baseline = int(np.floor(baseline))
if dmin != baseline:
adc_gain = (dmin - baseline) / pmin
if baseline > MAX_I32:
adc_gain = (MAX_I32) - dmin / abs(pmin)
baseline = MAX_I32
elif baseline < MIN_I32:
adc_gain = (dmax - MIN_I32) / pmax
baseline = MIN_I32
adc_gains.append(adc_gain)
baselines.append(baseline)
return (adc_gains, baselines) | Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values. |
def get_related_flat(self, content_id, min_strength=None):
rel_id_to_idents = self.get_related_coref_relationships(
content_id, min_strength=min_strength)
flat_list = []
for val in rel_id_to_idents.values():
flat_list.extend(val)
return flat_list | Follow coreference relationships to get full related graph.
This differs from ``get_related_coref_relationships`` in that
it returns a flat list of all identifiers found through the
coreference layer of indirection.
:rtype: list of identifiers |
def create_http_monitor(self, topics, transport_url, transport_token=None, transport_method=, connect_timeout=0,
response_timeout=0, batch_size=1, batch_duration=0, compression=, format_type=):
monitor_xml = .format(
topics=.join(topics),
transport_url=transport_url,
transport_token=transport_token,
transport_method=transport_method,
connect_timeout=connect_timeout,
response_timeout=response_timeout,
batch_size=batch_size,
batch_duration=batch_duration,
format_type=format_type,
compression=compression,
)
monitor_xml = textwrap.dedent(monitor_xml)
response = self._conn.post("/ws/Monitor", monitor_xml)
location = ET.fromstring(response.text).find().text
monitor_id = int(location.split()[-1])
return HTTPDeviceCloudMonitor(self._conn, monitor_id) | Creates a HTTP Monitor instance in Device Cloud for a given list of topics
:param topics: a string list of topics (e.g. ['DeviceCore[U]',
'FileDataCore']).
:param transport_url: URL of the customer web server.
:param transport_token: Credentials for basic authentication in the following format: username:password
:param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT.
:param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param response_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param batch_size: How many Msgs received before sending data.
:param batch_duration: How long to wait before sending batch if it
does not exceed batch_size.
:param compression: Compression value (i.e. 'gzip').
:param format_type: What format server should send data in (i.e. 'xml' or 'json').
Returns an object of the created Monitor |
def tile(self, bbox, z=0, format=None, clip=True):
tile_srid = 3857
bbox = getattr(bbox, , bbox)
clone = filter_geometry(self, intersects=bbox)
field = clone.geo_field
srid = field.srid
sql = field.name
try:
tilew = self.tilewidths[z]
except IndexError:
tilew = self.tilewidths[-1]
if bbox.srid != srid:
bbox = bbox.transform(srid, clone=True)
if bbox.srs.geographic:
p = geos.Point(tilew, tilew, srid=tile_srid)
p.transform(srid)
tilew = p.x
if clip:
bufbox = bbox.buffer(tilew)
sql = geofn.Intersection(sql, bufbox.envelope)
sql = SimplifyPreserveTopology(sql, tilew)
if format == :
return clone.pbf(bbox, geo_col=sql)
sql = geofn.Transform(sql, 4326)
return clone.annotate(**{format: sql}) | Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean |
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):
if self.is_requestable():
self._check_required_fields(, ignore_fields)
else:
self._check_required_fields(, ignore_fields)
if len(self.resources) == 0 and not allow_no_resources:
raise HDXError()
for resource in self.resources:
ignore_fields = []
resource.check_required_fields(ignore_fields=ignore_fields) | Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None |
def make_session(self):
with connect_lock:
if self._sf_session is None:
sf_session = requests.Session()
sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias,
settings_dict=self.settings_dict)
sf_instance_url = sf_session.auth.instance_url
sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries())
sf_session.mount(sf_instance_url, sf_requests_adapter)
self._sf_session = sf_session | Authenticate and get the name of assigned SFDC data server |
def _load_params_of(self, effect):
for param in effect.params:
if param.value != param.default:
self._set_param_value(param) | Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()` |
def get_object(self, name, obj):
return self[name](obj, **self.opts(name)) | :param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object |
def run_loop(leds=all_leds):
print()
while 1:
try:
if switch():
[led.on() for led in leds]
else:
[led.off() for led in leds]
except OSError:
break | Start the loop.
:param `leds`: Which LEDs to light up upon switch press.
:type `leds`: sequence of LED objects |
def translate(script):
tree = ast.parse(script)
ZiplineImportVisitor().visit(tree)
return astor.to_source(tree) | translate zipline script into pylivetrader script. |
def SetupDisplayDevice(self, type, state, percentage, energy, energy_full,
energy_rate, time_to_empty, time_to_full, is_present,
icon_name, warning_level):
if not self.api1:
raise dbus.exceptions.DBusException(
,
name=MOCK_IFACE + )
display_props = mockobject.objects[self.p_display_dev]
display_props.Set(DEVICE_IFACE, ,
dbus.UInt32(type))
display_props.Set(DEVICE_IFACE, ,
dbus.UInt32(state))
display_props.Set(DEVICE_IFACE, ,
percentage)
display_props.Set(DEVICE_IFACE, , energy)
display_props.Set(DEVICE_IFACE, ,
energy_full)
display_props.Set(DEVICE_IFACE, ,
energy_rate)
display_props.Set(DEVICE_IFACE, ,
dbus.Int64(time_to_empty))
display_props.Set(DEVICE_IFACE, ,
dbus.Int64(time_to_full))
display_props.Set(DEVICE_IFACE, ,
is_present)
display_props.Set(DEVICE_IFACE, ,
icon_name)
display_props.Set(DEVICE_IFACE, ,
dbus.UInt32(warning_level)) | Convenience method to configure DisplayDevice properties
This calls Set() for all properties that the DisplayDevice is defined to
have, and is shorter if you have to completely set it up instead of
changing just one or two properties.
This is only available when mocking the 1.0 API. |
def orient_averaged_adaptive(tm):
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
def Sfunc(beta, alpha, i, j, real):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
s = S_ang[i,j].real if real else S_ang[i,j].imag
return s * tm.or_pdf(beta)
ind = range(2)
for i in ind:
for j in ind:
S.real[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0
S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0
def Zfunc(beta, alpha, i, j):
(S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
return Z_ang[i,j] * tm.or_pdf(beta)
ind = range(4)
for i in ind:
for j in ind:
Z[i,j] = dblquad(Zfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0
return (S, Z) | Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices. |
def create_knowledge_base(project_id, display_name):
import dialogflow_v2beta1 as dialogflow
client = dialogflow.KnowledgeBasesClient()
project_path = client.project_path(project_id)
knowledge_base = dialogflow.types.KnowledgeBase(
display_name=display_name)
response = client.create_knowledge_base(project_path, knowledge_base)
print()
print(.format(response.display_name))
print(.format(response.name)) | Creates a Knowledge base.
Args:
project_id: The GCP project linked with the agent.
display_name: The display name of the Knowledge base. |
def disable_servicegroup_passive_host_checks(self, servicegroup):
for service_id in servicegroup.get_services():
if service_id in self.daemon.services:
host_id = self.daemon.services[service_id].host
self.disable_passive_host_checks(self.daemon.hosts[host_id]) | Disable passive host checks for a servicegroup
Format of the line that triggers function call::
DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name>
:param servicegroup: servicegroup to disable
:type servicegroup: alignak.objects.servicegroup.Servicegroup
:return: None |
def prepare_environment(work_dir):
package_directory = os.path.dirname(os.path.abspath(beeswarm.__file__))
logger.info()
shutil.copytree(os.path.join(package_directory, ), os.path.join(work_dir, ),
ignore=Honeypot._ignore_copy_files) | Performs a few maintenance tasks before the Honeypot is run. Copies the data directory,
and the config file to the cwd. The config file copied here is overwritten if
the __init__ method is called with a configuration URL.
:param work_dir: The directory to copy files to. |
def get_schedules_for_season(self, season, season_type="REG"):
try:
season = int(season)
if season_type not in ["REG", "PRE", "POST"]:
raise ValueError
except (ValueError, TypeError):
raise FantasyDataError()
season_param = "{0}{1}".format(season, season_type)
result = self._method_call("Schedules/{season}", "stats", season=season_param)
return result | Game schedule for a specified season. |
def _pwl1_to_poly(self, generators):
for g in generators:
if (g.pcost_model == PW_LINEAR) and (len(g.p_cost) == 2):
g.pwl_to_poly()
return generators | Converts single-block piecewise-linear costs into linear
polynomial. |
def ensure_final_value(packageName, arsc, value):
if value:
returnValue = value
if value[0] == :
try:
res_id = int( + value[1:], 16)
res_id = arsc.get_id(packageName, res_id)[1]
returnValue = arsc.get_string(packageName, res_id)[1]
except (ValueError, TypeError):
pass
return returnValue
return | Ensure incoming value is always the value, not the resid
androguard will sometimes return the Android "resId" aka
Resource ID instead of the actual value. This checks whether
the value is actually a resId, then performs the Android
Resource lookup as needed. |
def skip_whitespace(self):
skipped = 0
while True:
c = self._get_char()
if c != and c != :
if (c != ) or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1 | Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int |
def output(self, message, color=None):
output_to = stderr if color == "red" else stdout
secho(self.indent(message), fg=color, file=output_to) | A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr |
def scalarmult_B(e):
e %= L
P = IDENT
for i in range(253):
if e & 1:
P = edwards_add(P=P, Q=Bpow[i])
e //= 2
assert e == 0, e
return P | Implements scalarmult(B, e) more efficiently. |
def setup_dotcloud_account(cli):
client = RESTClient(endpoint=cli.client.endpoint)
client.authenticator = NullAuth()
urlmap = client.get().item
username = cli.prompt()
password = cli.prompt(, noecho=True)
credential = {: urlmap.get(),
: CLIENT_KEY, : CLIENT_SECRET}
try:
token = cli.authorize_client(urlmap.get(), credential, username, password)
except Exception as e:
cli.die()
token[] = credential[]
config = GlobalConfig()
config.data = {: token}
config.save()
cli.global_config = GlobalConfig()
cli.setup_auth()
cli.get_keys() | Gets user/pass for dotcloud, performs auth, and stores keys |
def rst_to_obj(cls, file_path=None, text=, columns=None,
remove_empty_rows=True, key_on=None,
deliminator=, eval_cells=True):
text = cls._get_lines(file_path, text)
if len(text) == 1:
text = text[0].split()
for i in [-1, 2, 0]:
if not text[i].replace(, ).strip():
text.pop(i)
lines = [row.split() for row in text]
list_of_list = cls._merge_quoted_cells(lines, deliminator,
remove_empty_rows, eval_cells,
excel_boolean=False)
return cls.list_to_obj(list_of_list, key_on=key_on, columns=columns) | This will convert a rst file or text to a seaborn table
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable |
def end_task(self):
self.progress(self.task_stack[-1].size)
self.task_stack.pop() | Remove the current task from the stack. |
def prepare_adiabatic_limit(slh, k=None):
if k is None:
k = symbols(, positive=True)
Ld = slh.L.dag()
LdL = (Ld * slh.L)[0, 0]
K = (-LdL / 2 + I * slh.H).expand().simplify_scalar()
N = slh.S.dag()
B, A, Y = K.series_expand(k, 0, 2)
G, F = Ld.series_expand(k, 0, 1)
return Y, A, B, F, G, N | Prepare the adiabatic elimination on an SLH object
Args:
slh: The SLH object to take the limit for
k: The scaling parameter $k \rightarrow \infty$. The default is a
positive symbol 'k'
Returns:
tuple: The objects ``Y, A, B, F, G, N``
necessary to compute the limiting system. |
def eigenvectors_right_samples(self):
r
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].eigenvectors_right
return res | r""" Samples of the right eigenvectors of the hidden transition matrix |
def read_simulation_temps(pathname,NumTemps):
print("--Reading temperatures from %s/..." % pathname)
temps_from_file = numpy.zeros(NumTemps, numpy.float64)
for k in range(NumTemps):
infile = open(os.path.join(pathname,+ str(k), +str(k)+), )
lines = infile.readlines()
infile.close()
for line in lines:
if (line[0:11] == ):
vals = line.split()
break
temps_from_file[k] = float(vals[1])
return temps_from_file | Reads in the various temperatures from each TEMP#/simul.output file by knowing
beforehand the total number of temperatures (parameter at top) |
def _example_from_allof(self, prop_spec):
example_dict = {}
for definition in prop_spec[]:
update = self.get_example_from_prop_spec(definition, True)
example_dict.update(update)
return example_dict | Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict |
def draw_polygon(
self,
*pts,
close_path=True,
stroke=None,
stroke_width=1,
stroke_dash=None,
fill=None
) -> None:
c = self.c
c.saveState()
if stroke is not None:
c.setStrokeColorRGB(*stroke)
c.setLineWidth(stroke_width)
c.setDash(stroke_dash)
if fill is not None:
c.setFillColorRGB(*fill)
p = c.beginPath()
fn = p.moveTo
for x,y in zip(*[iter(pts)]*2):
fn(x, y)
fn = p.lineTo
if close_path:
p.close()
c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None))
c.restoreState() | Draws the given polygon. |
def server_shutdown(server_state):
set_running( False )
rpc_stop(server_state)
api_stop(server_state)
server_atlas_shutdown(server_state)
gc_stop()
try:
if os.path.exists(server_state[]):
os.unlink(server_state[])
except:
pass
return True | Shut down server subsystems.
Remove PID file. |
def start(self):
super().start()
self.__poller = zmq.asyncio.Poller()
self.__poller.register(self.zap_socket, zmq.POLLIN)
self.__task = asyncio.ensure_future(self.__handle_zap()) | Start ZAP authentication |
def OnTool(self, event):
msgtype = self.ids_msgs[event.GetId()]
post_command_event(self, msgtype) | Toolbar event handler |
def _replace_scalar(self, scalar):
if not is_arg_scalar(scalar):
return scalar
name = scalar[1:]
return self.get_scalar_value(name) | Replace scalar name with scalar value |
def prepare_refresh_body(self, body=, refresh_token=None, scope=None, **kwargs):
refresh_token = refresh_token or self.refresh_token
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs) | Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner. |
def _md5_file(fn, block_size=1048576):
h = hashlib.md5()
with open(fn) as fp:
d = 1
while d:
d = fp.read(block_size)
h.update(d)
return h.hexdigest() | Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5 |
def is_filterbank(filename):
with open(filename, ) as fh:
is_fil = True
try:
keyword, value, idx = read_next_header_keyword(fh)
try:
assert keyword == b
except AssertionError:
is_fil = False
except KeyError:
is_fil = False
return is_fil | Open file and confirm if it is a filterbank file or not. |
def item(self, key):
return _item.Item(self._name, key, context=self._context) | Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key. |
def get_all():
*
services = _get_services()
ret = set()
for service in services:
ret.add(service[])
return sorted(ret) | Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all |
def format(self, tokensource, outfile):
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
,
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper()) | Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items. |
def Trebble_Bishnoi(self, T, full=True, quick=True):
r
c1 = self.alpha_function_coeffs
T, Tc, a = self.T, self.Tc, self.a
a_alpha = a*exp(c1*(-T/Tc + 1))
if not full:
return a_alpha
else:
da_alpha_dT = a*-c1*exp(c1*(-T/Tc + 1))/Tc
d2a_alpha_dT2 = a*c1**2*exp(-c1*(T/Tc - 1))/Tc**2
return a_alpha, da_alpha_dT, d2a_alpha_dT2 | r'''Method to calculate `a_alpha` and its first and second
derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. One coefficient needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{Tc} + 1\right)}
References
----------
.. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four-
Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1
(September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8. |
def easy_train_and_evaluate(hyper_params, Model=None, create_loss=None,
training_data=None, validation_data=None,
inline_plotting=False, session_config=None, log_suffix=None,
continue_training=False, continue_with_specific_checkpointpath=None):
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime()
chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp
if log_suffix is not None:
chkpt_path = chkpt_path + "_" + log_suffix
if session_config is None:
session_config = get_default_config()
if continue_with_specific_checkpointpath:
chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath
print("Continue with checkpoint: {}".format(chkpt_path))
elif continue_training:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(chkpt_path))
if not os.path.exists(chkpt_path):
os.makedirs(chkpt_path)
if Model is None:
model_backup = os.path.join(chkpt_path, "model.py")
copyfile(hyperparams["arch"]["model"].replace(".", os.sep), model_backup)
arch_model = __import__(hyperparams["arch"]["model"], fromlist=["Model"])
Model = arch_model.Model
if create_loss is None:
loss_backup = os.path.join(chkpt_path, "loss.py")
copyfile(hyperparams["arch"]["loss"].replace(".", os.sep), loss_backup)
arch_loss = __import__(hyperparams["arch"]["loss"], fromlist=["create_loss"])
create_loss = arch_loss.create_loss
print("Load data")
if training_data is None:
training_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN),
hyper_params.train.batch_size)
if validation_data is None:
validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION),
hyper_params.train.batch_size)
with open(chkpt_path + "/hyperparameters.json", "w") as json_file:
json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True))
estimator_spec = create_tf_estimator_spec(chkpt_path, Model, create_loss, inline_plotting)
config = None
if hyper_params.train.get("distributed", False):
distribution = tf.contrib.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
train_distribute=distribution,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
else:
config = tf.estimator.RunConfig(session_config=session_config,
model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
estimator = None
if hyper_params.train.get("warm_start_checkpoint", None) is not None:
warm_start_dir = hyper_params.train.warm_start_checkpoint
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
warm_start_from=warm_start_dir,
params=hyper_params)
else:
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
params=hyper_params)
throttle_secs = hyper_params.train.get("throttle_secs", 120)
train_spec = tf.estimator.TrainSpec(input_fn=training_data,
max_steps=hyper_params.train.steps)
eval_spec = tf.estimator.EvalSpec(input_fn=validation_data,
throttle_secs=throttle_secs)
print("Start training")
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return estimator | Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return: |
def parse(cls, buff, offset):
values = {}
for name, part in cls.parts:
value, new_offset = part.parse(buff, offset)
values[name] = value
offset = new_offset
return cls(**values), offset | Given a buffer and offset, returns the parsed value and new offset.
Calls `parse()` on the given buffer for each sub-part in order and
creates a new instance with the results. |
def speakerDiarizationEvaluateScript(folder_name, ldas):
types = (, )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folder_name, files)))
wavFilesList = sorted(wavFilesList)
N = []
for wav_file in wavFilesList:
gt_file = wav_file.replace(, );
if os.path.isfile(gt_file):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
N.append(len(list(set(seg_labs))))
else:
N.append(-1)
for l in ldas:
print("LDA = {0:d}".format(l))
for i, wav_file in enumerate(wavFilesList):
speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False)
print | This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
SEGMENT (ground-truth) files are stored
- ldas: a list of LDA dimensions (0 for no LDA) |
def _is_collinear(self, x, y):
pts = np.column_stack([x[:3], y[:3], np.ones(3)])
return np.linalg.det(pts) == 0.0 | Checks if first three points are collinear |
def _unzip(self, src, dst, scene, force_unzip=False):
self.output("Unzipping %s - It might take some time" % scene, normal=True, arrow=True)
try:
if isdir(dst) and not force_unzip:
self.output( % scene, normal=True, color=, indent=1)
return
else:
tar = tarfile.open(src, )
tar.extractall(path=dst)
tar.close()
except tarfile.ReadError:
check_create_folder(dst)
subprocess.check_call([, , src, , dst]) | Unzip tar files |
def process_transform(self, tag_value, resource_set):
self.log.info("Transforming tag value on %s instances" % (
len(resource_set)))
key = self.data.get()
c = utils.local_session(self.manager.session_factory).client()
self.create_tag(
c,
[r[self.id_key] for r in resource_set if len(
r.get(, [])) < 50],
key, tag_value) | Transform tag value
- Collect value from tag
- Transform Tag value
- Assign new value for key |
def read_from_buffer(cls, buf, identifier_str=None):
try:
return cls._read_from_buffer(buf, identifier_str)
except Exception as e:
cls._load_error(e, identifier_str) | Load the context from a buffer. |
def get_unused_node_id(graph, initial_guess=, _format=):
has_node = graph.has_node
n = counter()
node_id_format = _format.format(initial_guess)
node_id = initial_guess
while has_node(node_id):
node_id = node_id_format % n()
return node_id | Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str |
def updateWPText(self):
self.wpText.set_position((self.leftPos+(1.5*self.vertSize/10.0),0.97-(1.5*self.vertSize)+(0.5*self.vertSize/10.0)))
self.wpText.set_size(self.fontSize)
if type(self.nextWPTime) is str:
self.wpText.set_text( % (self.currentWP,self.finalWP,self.wpDist))
else:
self.wpText.set_text( % (self.currentWP,self.finalWP,self.wpDist,self.nextWPTime)) | Updates the current waypoint and distance to it. |
def queue_files(dirpath, queue):
for root, _, files in os.walk(os.path.abspath(dirpath)):
if not files:
continue
for filename in files:
queue.put(os.path.join(root, filename)) | Add files in a directory to a queue |
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip):
sp_template_info_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values())
vlan_name = self.make_vlan_name(vlan_id)
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
try:
for sp_template_info in sp_template_info_list:
sp_template_path = sp_template_info.path
sp_template = sp_template_info.name
sp_template_full_path = (sp_template_path +
const.SP_TEMPLATE_PREFIX + sp_template)
obj = handle.query_dn(sp_template_full_path)
if not obj:
LOG.error(
,
sp_template_full_path)
continue
eth_port_paths = ["%s%s" % (sp_template_full_path, ep)
for ep in virtio_port_list]
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
vlan_path = (eth_port_path +
const.VLAN_PATH_PREFIX + vlan_name)
vlan = handle.query_dn(vlan_path)
if vlan:
handle.remove_mo(vlan)
else:
LOG.debug(
, vlan_name, eth_port_path)
else:
LOG.debug(
, eth_port_path)
handle.commit()
return True
except Exception as e:
raise cexc.UcsmConfigDeleteFailed(config=vlan_id,
ucsm_ip=ucsm_ip,
exc=e) | Deletes VLAN config from all SP Templates that have it. |
def get_notices(self):
result = []
for date, title in self.get_notice().items():
content = self.get_notice_content(date)
result.append([date, title, content])
return result | [deprecated] 建議使用方法 `get_notice()` 及 `get_notice_content()` |
def listRoleIds(self, *args, **kwargs):
return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs) | List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable`` |
def push_blob(self,
filename=None,
progress=None,
data=None, digest=None,
check_exists=True):
if filename is None:
dgst = digest
else:
dgst = hash_file(filename)
if check_exists:
try:
self._request(, + dgst)
return dgst
except requests.exceptions.HTTPError as ex:
if ex.response.status_code != requests.codes.not_found:
raise
r = self._request(, )
upload_url = r.headers[]
url_parts = list(urlparse.urlparse(upload_url))
query = urlparse.parse_qs(url_parts[4])
query.update({: dgst})
url_parts[4] = urlencode(query, True)
url_parts[0] = if self._insecure else
upload_url = urlparse.urlunparse(url_parts)
if filename is None:
data = _ReportingChunks(dgst, data, progress) if progress else data
self._base_request(, upload_url, data=data)
else:
with open(filename, ) as f:
data = _ReportingFile(dgst, f, progress) if progress else f
self._base_request(, upload_url, data=data)
return dgst | Upload a file to the registry and return its (SHA-256) hash.
The registry is content-addressable so the file's content (aka blob)
can be retrieved later by passing the hash to :meth:`pull_blob`.
:param filename: File to upload.
:type filename: str
:param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``.
:type data: Generator or iterator
:param digest: Hash of the data to be uploaded in ``data``, if specified.
:type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``)
:param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file.
:type progress: function(dgst, chunk, size)
:param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again.
:type check_exists: bool
:rtype: str
:returns: Hash of file's content. |
def get_notification_commands(self, notifways, n_type, command_name=False):
res = []
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
res.extend(notifway.get_notification_commands(n_type))
if command_name:
setattr(self, n_type + , [c.get_name() for c in res])
else:
setattr(self, n_type + , res)
return res | Get notification commands for object type
:param notifways: list of alignak.objects.NotificationWay objects
:type notifways: NotificationWays
:param n_type: object type (host or service)
:type n_type: string
:param command_name: True to update the inner property with the name of the command,
False to update with the Command objects list
:type command_name: bool
:return: command list
:rtype: list[alignak.objects.command.Command] |
def check(self):
if self.__next_start is not None:
utc_now = utc_datetime()
if utc_now >= self.__next_start:
result = []
for task_source in self.__next_sources:
records = task_source.has_records()
if records is not None:
result.extend(records)
self.__update_all()
if len(result) > 0:
return tuple(result) | Check if there are records that are ready to start and return them if there are any
:return: tuple of WScheduleRecord or None (if there are no tasks to start) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.