code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def find_key_by_email(self, email, secret=False): for key in self.list_keys(secret=secret): for uid in key['uids']: if re.search(email, uid): return key raise LookupError("GnuPG public key for email %s not found!" % email)
Find user's key based on their email address. :param str email: The email address to search for. :param bool secret: If True, search through secret keyring.
def _fire_ipopo_event(self, kind, factory_name, instance_name=None): with self.__listeners_lock: listeners = self.__listeners[:] for listener in listeners: try: listener.handle_ipopo_event( constants.IPopoEvent(kind, factory_name, instance_name) ) except: _logger.exception("Error calling an iPOPO event handler")
Triggers an iPOPO event :param kind: Kind of event :param factory_name: Name of the factory associated to the event :param instance_name: Name of the component instance associated to the event
def load(theTask, canExecute=True, strict=True, defaults=False): return teal(theTask, parent=None, loadOnly=True, returnAs="dict", canExecute=canExecute, strict=strict, errorsToTerm=True, defaults=defaults)
Shortcut to load TEAL .cfg files for non-GUI access where loadOnly=True.
def OSXEnumerateRunningServicesFromClient(args): del args osx_version = client_utils_osx.OSXVersion() version_array = osx_version.VersionAsMajorMinor() if version_array[:2] < [10, 6]: raise UnsupportedOSVersionError( "ServiceManagement API unsupported on < 10.6. This client is %s" % osx_version.VersionString()) launchd_list = GetRunningLaunchDaemons() parser = osx_launchd.OSXLaunchdJobDict(launchd_list) for job in parser.Parse(): response = CreateServiceProto(job) yield response
Get running launchd jobs. Args: args: Unused. Yields: `rdf_client.OSXServiceInformation` instances. Raises: UnsupportedOSVersionError: for OS X earlier than 10.6.
def listdir(self, name, **kwargs): assert self._is_s3(name), "name must be in form s3://bucket/prefix/" if not name.endswith('/'): name += "/" return self.list(name, delimiter='/', **kwargs)
Returns a list of the files under the specified path. This is different from list as it will only give you files under the current directory, much like ls. name must be in the form of `s3://bucket/prefix/` Parameters ---------- keys: optional if True then this will return the actual boto keys for files that are encountered objects: optional if True then this will return the actual boto objects for files or prefixes that are encountered
def load(self): self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) with io.open(self.source, "r", encoding=self.encoding) as fp: formatter = MediaWikiTableFormatter(fp.read()) formatter.accept(self) return formatter.to_table_data()
Extract tabular data as |TableData| instances from a MediaWiki file. |load_source_desc_file| :return: Loaded table data iterator. |load_table_name_desc| =================== ============================================== Format specifier Value after the replacement =================== ============================================== ``%(filename)s`` |filename_desc| ``%(key)s`` | This replaced to: | **(1)** ``caption`` mark of the table | **(2)** ``%(format_name)s%(format_id)s`` | if ``caption`` mark not included | in the table. ``%(format_name)s`` ``"mediawiki"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ============================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the MediaWiki data is invalid or empty.
def request(self, source="candidate"): node = new_ele("validate") if type(source) is str: src = util.datastore_or_url("source", source, self._assert) else: validated_element(source, ("config", qualify("config"))) src = new_ele("source") src.append(source) node.append(src) return self._request(node)
Validate the contents of the specified configuration. *source* is the name of the configuration datastore being validated or `config` element containing the configuration subtree to be validated :seealso: :ref:`srctarget_params`
def random_val(index, tune_params): key = list(tune_params.keys())[index] return random.choice(tune_params[key])
return a random value for a parameter
def lower_coerce_type_block_type_data(ir_blocks, type_equivalence_hints): allowed_key_type_spec = (GraphQLInterfaceType, GraphQLObjectType) allowed_value_type_spec = GraphQLUnionType for key, value in six.iteritems(type_equivalence_hints): if (not isinstance(key, allowed_key_type_spec) or not isinstance(value, allowed_value_type_spec)): msg = (u'Invalid type equivalence hints received! Hint {} ({}) -> {} ({}) ' u'was unexpected, expected a hint in the form ' u'GraphQLInterfaceType -> GraphQLUnionType or ' u'GraphQLObjectType -> GraphQLUnionType'.format(key.name, str(type(key)), value.name, str(type(value)))) raise GraphQLCompilationError(msg) equivalent_type_names = { key.name: {x.name for x in value.types} for key, value in six.iteritems(type_equivalence_hints) } new_ir_blocks = [] for block in ir_blocks: new_block = block if isinstance(block, CoerceType): target_class = get_only_element_from_collection(block.target_class) if target_class in equivalent_type_names: new_block = CoerceType(equivalent_type_names[target_class]) new_ir_blocks.append(new_block) return new_ir_blocks
Rewrite CoerceType blocks to explicitly state which types are allowed in the coercion.
def _register_callback(cnx, tag_prefix, obj, event, real_id): libvirt_name = real_id if real_id is None: libvirt_name = 'VIR_{0}_EVENT_ID_{1}'.format(obj, event).upper() if not hasattr(libvirt, libvirt_name): log.warning('Skipping "%s/%s" events: libvirt too old', obj, event) return None libvirt_id = getattr(libvirt, libvirt_name) callback_name = "_{0}_event_{1}_cb".format(obj, event) callback = globals().get(callback_name, None) if callback is None: log.error('Missing function %s in engine', callback_name) return None register = getattr(cnx, REGISTER_FUNCTIONS[obj]) return register(None, libvirt_id, callback, {'prefix': tag_prefix, 'object': obj, 'event': event})
Helper function registering a callback :param cnx: libvirt connection :param tag_prefix: salt event tag prefix to use :param obj: the libvirt object name for the event. Needs to be one of the REGISTER_FUNCTIONS keys. :param event: the event type name. :param real_id: the libvirt name of an alternative event id to use or None :rtype integer value needed to deregister the callback
def _cnf_formula(lexer, varname, nvars, nclauses): clauses = _clauses(lexer, varname, nvars) if len(clauses) < nclauses: fstr = "formula has fewer than {} clauses" raise Error(fstr.format(nclauses)) if len(clauses) > nclauses: fstr = "formula has more than {} clauses" raise Error(fstr.format(nclauses)) return ('and', ) + clauses
Return a DIMACS CNF formula.
def reset_actions(self): if self._driver.w3c: self.w3c_actions.clear_actions() self._actions = []
Clears actions that are already stored locally and on the remote end
def register_app(self, app=None): app = app or self.options['DEFAULT_APP'] if not app: raise ImproperlyConfigured('An app name is required because DEFAULT_APP is empty - please use a ' 'valid app name or set the DEFAULT_APP in settings') if isinstance(app, str): app = apps.get_app_config(app) with self.registration_lock: if app.name in self.registered_apps: return self.registered_apps[app.name] = app self.engine.get_template_loader(app, 'templates', create=True) self.engine.get_template_loader(app, 'scripts', create=True) self.engine.get_template_loader(app, 'styles', create=True) if self.options['SIGNALS']: dmp_signal_register_app.send(sender=self, app_config=app)
Registers an app as a "DMP-enabled" app. Normally, DMP does this automatically when included in urls.py. If app is None, the DEFAULT_APP is registered.
def getfields(comm): fields = [] for field in comm: if 'field' in field: fields.append(field) return fields
get all the fields that have the key 'field'
def _check_aligned_header(self, data_type, unit): if data_type is not None: assert isinstance(data_type, DataTypeBase), \ 'data_type must be a Ladybug DataType. Got {}'.format(type(data_type)) if unit is None: unit = data_type.units[0] else: data_type = self.header.data_type unit = unit or self.header.unit return Header(data_type, unit, self.header.analysis_period, self.header.metadata)
Check the header inputs whenever get_aligned_collection is called.
def get_fk_popup_field(cls, *args, **kwargs): kwargs['popup_name'] = cls.get_class_verbose_name() kwargs['permissions_required'] = cls.permissions_required if cls.template_name_fk is not None: kwargs['template_name'] = cls.template_name_fk return ForeignKeyWidget('{}_popup_create'.format(cls.get_class_name()), *args, **kwargs)
generate fk field related to class wait popup crud
def read_plain_int64(file_obj, count): return struct.unpack("<{}q".format(count).encode("utf-8"), file_obj.read(8 * count))
Read `count` 64-bit ints using the plain encoding.
def get_dir(path_name, *, greedy=False, override=None, identity=None): if identity is None: identity = identify(path_name, override=override) path_name = os.path.normpath(path_name) if greedy and identity == ISDIR: return path_name else: return os.path.dirname(path_name)
Gets the directory path of the given path name. If the argument 'greedy' is specified as True, then if the path name represents a directory itself, the function will return the whole path
def get_deploy_data(self): if self.state and self.state.deploy_data: return self.state.deploy_data return {}
Gets any default data attached to the current deploy, if any.
def create_sphere_around_elec(xyz, template_mri, distance=8, freesurfer=None): if freesurfer is None: shift = 0 else: shift = freesurfer.surface_ras_shift if isinstance(template_mri, str) or isinstance(template_mri, Path): template_mri = nload(str(template_mri)) mask = zeros(template_mri.shape, dtype='bool') for vox in ndindex(template_mri.shape): vox_ras = apply_affine(template_mri.affine, vox) - shift if norm(xyz - vox_ras) <= distance: mask[vox] = True return mask
Create an MRI mask around an electrode location, Parameters ---------- xyz : ndarray 3x0 array template_mri : path or str (as path) or nibabel.Nifti (path to) MRI to be used as template distance : float distance in mm between electrode and selected voxels freesurfer : instance of Freesurfer to adjust RAS coordinates, see Notes Returns ------- 3d bool ndarray mask where True voxels are within selected distance to the electrode Notes ----- Freesurfer uses two coordinate systems: one for volumes ("RAS") and one for surfaces ("tkReg", "tkRAS", and "Surface RAS"), so the electrodes might be stored in one of the two systems. If the electrodes are in surface coordinates (f.e. if you can plot surface and electrodes in the same space), then you need to convert the coordinate system. This is done by passing an instance of Freesurfer.
def _sync_notes(self, notes_json): for note_json in notes_json: note_id = note_json['id'] task_id = note_json['item_id'] if task_id not in self.tasks: continue task = self.tasks[task_id] self.notes[note_id] = Note(note_json, task)
Populate the user's notes from a JSON encoded list.
def ctcp(self, target, message, nowait=False): if target and message: messages = utils.split_message(message, self.config.max_length) f = None for message in messages: f = self.send_line('PRIVMSG %s :\x01%s\x01' % (target, message), nowait=nowait) return f
send a ctcp to target
def get_ecf_props(ep_id, ep_id_ns, rsvc_id=None, ep_ts=None): results = {} if not ep_id: raise ArgumentError("ep_id", "ep_id must be a valid endpoint id") results[ECF_ENDPOINT_ID] = ep_id if not ep_id_ns: raise ArgumentError("ep_id_ns", "ep_id_ns must be a valid namespace") results[ECF_ENDPOINT_CONTAINERID_NAMESPACE] = ep_id_ns if not rsvc_id: rsvc_id = get_next_rsid() results[ECF_RSVC_ID] = rsvc_id if not ep_ts: ep_ts = time_since_epoch() results[ECF_ENDPOINT_TIMESTAMP] = ep_ts return results
Prepares the ECF properties :param ep_id: Endpoint ID :param ep_id_ns: Namespace of the Endpoint ID :param rsvc_id: Remote service ID :param ep_ts: Timestamp of the endpoint :return: A dictionary of ECF properties
def get_data_from_request(): return { 'request': { 'url': '%s://%s%s' % (web.ctx['protocol'], web.ctx['host'], web.ctx['path']), 'query_string': web.ctx.query, 'method': web.ctx.method, 'data': web.data(), 'headers': dict(get_headers(web.ctx.environ)), 'env': dict(get_environ(web.ctx.environ)), } }
Returns request data extracted from web.ctx.
def parametrized_unbottleneck(x, hidden_size, hparams): if hparams.bottleneck_kind == "tanh_discrete": return tanh_discrete_unbottleneck(x, hidden_size) if hparams.bottleneck_kind == "isemhash": return isemhash_unbottleneck(x, hidden_size, hparams.isemhash_filter_size_multiplier) if hparams.bottleneck_kind in ["vq", "em", "gumbel_softmax"]: return vq_discrete_unbottleneck(x, hidden_size) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
Meta-function calling all the above un-bottlenecks with hparams.
def predict(self, mllib_data): if isinstance(mllib_data, pyspark.mllib.linalg.Matrix): return to_matrix(self._master_network.predict(from_matrix(mllib_data))) elif isinstance(mllib_data, pyspark.mllib.linalg.Vector): return to_vector(self._master_network.predict(from_vector(mllib_data))) else: raise ValueError( 'Provide either an MLLib matrix or vector, got {}'.format(mllib_data.__name__))
Predict probabilities for an RDD of features
def drop_field(app_name, model_name, field_name): app_config = apps.get_app_config(app_name) model = app_config.get_model(model_name) field = model._meta.get_field(field_name) with connection.schema_editor() as schema_editor: schema_editor.remove_field(model, field)
Drop the given field from the app's model
def _glob_pjoin(*parts): if parts[0] in ('.', ''): parts = parts[1:] return pjoin(*parts).replace(os.sep, '/')
Join paths for glob processing
def _add_missing_routes(route_spec, failed_ips, questionable_ips, chosen_routers, vpc_info, con, routes_in_rts): for dcidr, hosts in route_spec.items(): new_router_ip = chosen_routers.get(dcidr) for rt_id, dcidr_list in routes_in_rts.items(): if dcidr not in dcidr_list: if not new_router_ip: new_router_ip = _choose_different_host(None, hosts, failed_ips, questionable_ips) if not new_router_ip: logging.warning("--- cannot find available target " "for route addition %s! " "Nothing I can do..." % (dcidr)) break _add_new_route(dcidr, new_router_ip, vpc_info, con, rt_id)
Iterate over route spec and add all the routes we haven't set yet. This relies on being told what routes we HAVE already. This is passed in via the routes_in_rts dict. Furthermore, some routes may be set in some RTs, but not in others. In that case, we may already have seen which router was chosen for a certain route. This information is passed in via the chosen_routers dict. We should choose routers that were used before.
def sanity_check_insdcio(handle, id_marker, fake_id_line): found_id = False found_end_marker = False for line in handle: line = line.strip() if not line: continue if line.startswith(id_marker): found_id = True break if line.startswith('//'): found_end_marker = True break handle.seek(0) if found_id: return handle if not found_end_marker: return handle new_handle = StringIO() new_handle.write("%s\n" % fake_id_line) new_handle.write(handle.read()) new_handle.seek(0) return new_handle
Sanity check for insdcio style files
def publish_and_get_event(self, resource): l_subscribed = False this_event = None if not self.__subscribed: self._get_event_stream() self._subscribe_myself() l_subscribed = True status = self.publish( action='get', resource=resource, mode=None, publish_response=False) if status == 'success': i = 0 while not this_event and i < 2: self.__event_handle.wait(5.0) self.__event_handle.clear() _LOGGER.debug("Instance %s resource: %s", str(i), resource) for event in self.__events: if event['resource'] == resource: this_event = event self.__events.remove(event) break i = i + 1 if l_subscribed: self._unsubscribe_myself() self._close_event_stream() l_subscribed = False return this_event
Publish and get the event from base station.
def fail_connection(self, code: int = 1006, reason: str = "") -> None: logger.debug( "%s ! failing %s WebSocket connection with code %d", self.side, self.state.name, code, ) if hasattr(self, "transfer_data_task"): self.transfer_data_task.cancel() if code != 1006 and self.state is State.OPEN: frame_data = serialize_close(code, reason) self.state = State.CLOSING logger.debug("%s - state = CLOSING", self.side) frame = Frame(True, OP_CLOSE, frame_data) logger.debug("%s > %r", self.side, frame) frame.write( self.writer.write, mask=self.is_client, extensions=self.extensions ) if not hasattr(self, "close_connection_task"): self.close_connection_task = self.loop.create_task(self.close_connection())
7.1.7. Fail the WebSocket Connection This requires: 1. Stopping all processing of incoming data, which means cancelling :attr:`transfer_data_task`. The close code will be 1006 unless a close frame was received earlier. 2. Sending a close frame with an appropriate code if the opening handshake succeeded and the other side is likely to process it. 3. Closing the connection. :meth:`close_connection` takes care of this once :attr:`transfer_data_task` exits after being canceled. (The specification describes these steps in the opposite order.)
def reflect_image(image, axis=None, tx=None, metric='mattes'): if axis is None: axis = image.dimension - 1 if (axis > image.dimension) or (axis < 0): axis = image.dimension - 1 rflct = mktemp(suffix='.mat') libfn = utils.get_lib_fn('reflectionMatrix%s'%image._libsuffix) libfn(image.pointer, axis, rflct) if tx is not None: rfi = registration(image, image, type_of_transform=tx, syn_metric=metric, outprefix=mktemp(), initial_transform=rflct) return rfi else: return apply_transforms(image, image, rflct)
Reflect an image along an axis ANTsR function: `reflectImage` Arguments --------- image : ANTsImage image to reflect axis : integer (optional) which dimension to reflect across, numbered from 0 to imageDimension-1 tx : string (optional) transformation type to estimate after reflection metric : string similarity metric for image registration. see antsRegistration. Returns ------- ANTsImage Example ------- >>> import ants >>> fi = ants.image_read( ants.get_ants_data('r16'), 'float' ) >>> axis = 2 >>> asym = ants.reflect_image(fi, axis, 'Affine')['warpedmovout'] >>> asym = asym - fi
def expected_peer_units(): if not has_juju_version("2.4.0"): raise NotImplementedError("goal-state") _goal_state = goal_state() return (key for key in _goal_state['units'] if '/' in key and key != local_unit())
Get a generator for units we expect to join peer relation based on goal-state. The local unit is excluded from the result to make it easy to gauge completion of all peers joining the relation with existing hook tools. Example usage: log('peer {} of {} joined peer relation' .format(len(related_units()), len(list(expected_peer_units())))) This function will raise NotImplementedError if used with juju versions without goal-state support. :returns: iterator :rtype: types.GeneratorType :raises: NotImplementedError
def from_wif_or_ewif_file(path: str, password: Optional[str] = None) -> SigningKeyType: with open(path, 'r') as fh: wif_content = fh.read() regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format WIF or EWIF v1 file') wif_hex = match.groups()[0] return SigningKey.from_wif_or_ewif_hex(wif_hex, password)
Return SigningKey instance from Duniter WIF or EWIF file :param path: Path to WIF of EWIF file :param password: Password needed for EWIF file
def delete(self, key): self._get_table() self.table.delete_item(key=key) log.debug("Deleted item at key '%s'" % (key))
If this key exists, delete it
def clean(self): result = super(User, self).clean() result['verified'] = 'verification_hash' not in self._resource return result
Verified value is derived from whether user has a verification hash
def set_image(self, image): imwidth, imheight = image.size if imwidth != 8 or imheight != 16: raise ValueError('Image must be an 8x16 pixels in size.') pix = image.convert('1').load() for x in xrange(8): for y in xrange(16): color = pix[(x, y)] if color == 0: self.set_pixel(x, y, 0) else: self.set_pixel(x, y, 1)
Set display buffer to Python Image Library image. Image will be converted to 1 bit color and non-zero color values will light the LEDs.
def info(gandi, resource): output_keys = ['ip', 'state', 'dc', 'type', 'vm', 'reverse'] datacenters = gandi.datacenter.list() ip = gandi.ip.info(resource) iface = gandi.iface.info(ip['iface_id']) vms = None if iface.get('vm_id'): vm = gandi.iaas.info(iface['vm_id']) vms = {vm['id']: vm} output_ip(gandi, ip, datacenters, vms, {iface['id']: iface}, output_keys) return ip
Display information about an ip. Resource can be an ip or id.
def get_tilt(cont): if isinstance(cont, np.ndarray): cont = [cont] ret_list = False else: ret_list = True length = len(cont) tilt = np.zeros(length, dtype=float) * np.nan for ii in range(length): moments = cont_moments_cv(cont[ii]) if moments is not None: oii = 0.5 * np.arctan2(2 * moments['mu11'], moments['mu02'] - moments['mu20']) tilt[ii] = oii + np.pi/2 tilt = np.mod(tilt, np.pi) tilt[tilt > np.pi/2] -= np.pi tilt = np.abs(tilt) if not ret_list: tilt = tilt[0] return tilt
Compute tilt of raw contour relative to channel axis Parameters ---------- cont: ndarray or list of ndarrays of shape (N,2) A 2D array that holds the contour of an event (in pixels) e.g. obtained using `mm.contour` where `mm` is an instance of `RTDCBase`. The first and second columns of `cont` correspond to the x- and y-coordinates of the contour. Returns ------- tilt: float or ndarray of size N Tilt of the contour in the interval [0, PI/2] References ---------- - `<https://en.wikipedia.org/wiki/Image_moment#Examples_2>`__
def create_scraper(cls, sess=None, **kwargs): scraper = cls(**kwargs) if sess: attrs = ["auth", "cert", "cookies", "headers", "hooks", "params", "proxies", "data"] for attr in attrs: val = getattr(sess, attr, None) if val: setattr(scraper, attr, val) return scraper
Convenience function for creating a ready-to-go CloudflareScraper object.
def transform_conf_module(cls): global CONF_NODE if cls.name == 'openhtf.conf': cls._locals.update(cls.locals['Configuration'][0].locals) CONF_NODE = cls CONF_LOCALS.update(cls.locals)
Transform usages of the conf module by updating locals.
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH): assert 0 <= t0 <= 1 and 0 <= t1 <= 1 if _quad_available: return quad(lambda tau: abs(self.derivative(tau)), t0, t1, epsabs=error, limit=1000)[0] else: return segment_length(self, t0, t1, self.point(t0), self.point(t1), error, min_depth, 0)
The length of an elliptical large_arc segment requires numerical integration, and in that case it's simpler to just do a geometric approximation, as for cubic bezier curves.
def getValueByName(node, name): try: value = node.xpath("*[local-name() = '%s']" % name)[0].text.strip() except: return None return value
A helper function to pull the values out of those annoying namespace prefixed tags
def encode_id_header(resource): if not hasattr(resource, "id"): return {} return { "X-{}-Id".format( camelize(name_for(resource)) ): str(resource.id), }
Generate a header for a newly created resource. Assume `id` attribute convention.
def mk_fmtfld(nt_item, joinchr=" ", eol="\n"): fldstrs = [] fld2fmt = { 'hdrgo' : lambda f: "{{{FLD}:1,}}".format(FLD=f), 'dcnt' : lambda f: "{{{FLD}:6,}}".format(FLD=f), 'level' : lambda f: "L{{{FLD}:02,}}".format(FLD=f), 'depth' : lambda f: "D{{{FLD}:02,}}".format(FLD=f), } for fld in nt_item._fields: if fld in fld2fmt: val = fld2fmt[fld](fld) else: val = "{{{FLD}}}".format(FLD=fld) fldstrs.append(val) return "{LINE}{EOL}".format(LINE=joinchr.join(fldstrs), EOL=eol)
Given a namedtuple, return a format_field string.
def namespace(self): if self.prefix is None: return Namespace.default else: return self.resolvePrefix(self.prefix)
Get the attributes namespace. This may either be the namespace defined by an optional prefix, or its parent's namespace. @return: The attribute's namespace @rtype: (I{prefix}, I{name})
def _writeString(self, obj, use_reference=True): string = to_bytes(obj, "utf-8") if use_reference and isinstance(obj, JavaString): try: idx = self.references.index(obj) except ValueError: self.references.append(obj) logging.debug( "*** Adding ref 0x%X for string: %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string) else: logging.debug( "*** Reusing ref 0x%X for string: %s", idx + self.BASE_REFERENCE_IDX, obj, ) self.write_reference(idx) else: self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string)
Appends a string to the serialization stream :param obj: String to serialize :param use_reference: If True, allow writing a reference
def registerHandler(self, fh): self.fds.add(fh) self.atime = int(time()) self.lock.acquire() try: if (0, self.filesize) not in self.avail and self.preferences['stream'] is False: Downloader.fetch(self, None, fh) except requests.exceptions.ConnectionError: raise ConnectionError finally: self.lock.release()
Register new file descriptor. Parameters ---------- fh : int File descriptor.
def get_and_write_raw(self, url: str, filename: str) -> None: self.write_raw(self.get_raw(url), filename)
Downloads and writes anonymously-requested raw data into a file. :raises QueryReturnedNotFoundException: When the server responds with a 404. :raises QueryReturnedForbiddenException: When the server responds with a 403. :raises ConnectionException: When download repeatedly failed.
def maybe_print_as_json(opts, data, page_info=None): if opts.output not in ("json", "pretty_json"): return False root = {"data": data} if page_info is not None and page_info.is_valid: meta = root["meta"] = {} meta["pagination"] = page_info.as_dict(num_results=len(data)) if opts.output == "pretty_json": dump = json.dumps(root, indent=4, sort_keys=True) else: dump = json.dumps(root, sort_keys=True) click.echo(dump) return True
Maybe print data as JSON.
def most_mergeable(self, states): histories = set(self.get_ref(s.history) for s in states) for n in networkx.algorithms.dfs_postorder_nodes(self._graph): intersection = histories.intersection(self.all_successors(n)) if len(intersection) > 1: return ( [ s for s in states if self.get_ref(s.history) in intersection ], n(), [ s for s in states if self.get_ref(s.history) not in intersection ] ) return set(), None, states
Find the "most mergeable" set of states from those provided. :param states: a list of states :returns: a tuple of: (a list of states to merge, those states' common history, a list of states to not merge yet)
def get_last_activity(session): try: return datetime.strptime(session['_session_security'], '%Y-%m-%dT%H:%M:%S.%f') except AttributeError: return datetime.now() except TypeError: return datetime.now()
Get the last activity datetime string from the session and return the python datetime object.
def check_user(user, password): return ((user == attowiki.user or attowiki.user is None) and (password == attowiki.password or attowiki.password is None))
check the auth for user and password.
def extract_payload(self, request, verify=True, *args, **kwargs): payload = self._verify( request, return_payload=True, verify=verify, *args, **kwargs ) return payload
Extract a payload from a request object.
def _run_pyshark(self, pyshark): if self._exlyr != 'None' or self._exptl != 'null': warnings.warn("'Extractor(engine=pyshark)' does not support protocol and layer threshold; " f"'layer={self._exlyr}' and 'protocol={self._exptl}' ignored", AttributeWarning, stacklevel=stacklevel()) if (self._ipv4 or self._ipv6 or self._tcp): self._ipv4 = self._ipv6 = self._tcp = False self._reasm = [None] * 3 warnings.warn("'Extractor(engine=pyshark)' object dose not support reassembly; " f"so 'ipv4={self._ipv4}', 'ipv6={self._ipv6}' and 'tcp={self._tcp}' will be ignored", AttributeWarning, stacklevel=stacklevel()) self._expkg = pyshark self._extmp = iter(pyshark.FileCapture(self._ifnm, keep_packets=False)) self.record_frames()
Call pyshark.FileCapture to extract PCAP files.
def start(self): if self.greenlet: raise RuntimeError(f'Greenlet {self.greenlet!r} already started') pristine = ( not self.greenlet.dead and tuple(self.greenlet.args) == tuple(self.args) and self.greenlet.kwargs == self.kwargs ) if not pristine: self.greenlet = Greenlet(self._run, *self.args, **self.kwargs) self.greenlet.name = f'{self.__class__.__name__}|{self.greenlet.name}' self.greenlet.start()
Synchronously start task Reimplements in children an call super().start() at end to start _run() Start-time exceptions may be raised
def GetInstances(r, bulk=False): if bulk: return r.request("get", "/2/instances", query={"bulk": 1}) else: instances = r.request("get", "/2/instances") return r.applier(itemgetters("id"), instances)
Gets information about instances on the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or list of str @return: if bulk is True, info about the instances, else a list of instances
def quasiparticle_weight(self): return np.array([self.expected(op)**2 for op in self.oper['O']])
Calculates quasiparticle weight
def set_redraw_lag(self, lag_sec): self.defer_redraw = (lag_sec > 0.0) if self.defer_redraw: self.defer_lagtime = lag_sec
Set lag time for redrawing the canvas. Parameters ---------- lag_sec : float Number of seconds to wait.
def unarchive_user(self, user_id): url = self.record_url + "/unarchive" res = requests.patch(url=url, json={"user_id": user_id}, headers=HEADERS, verify=False) self.write_response_html_to_file(res,"bob.html") res.raise_for_status()
Unarchives the user with the specified user ID. Args: user_id: `int`. The ID of the user to unarchive. Returns: `NoneType`: None.
def is_expired(self): expires = self.expires_at if expires is not None: return expires <= datetime.utcnow() return False
``True`` if this key is expired, otherwise ``False``
def close_stream(self): if self.fout: fout = self.fout fout_fn = self.fout_fn self.fout.flush() self.fout.close() self.fout = None self.fout_fn = None return fout_fn
Terminates an open stream and returns the filename of the file containing the streamed data.
def _send_and_reconnect(self, message): try: self.socket.sendall(message.encode("ascii")) except (AttributeError, socket.error): if not self.autoreconnect(): raise else: self.socket.sendall(message.encode("ascii"))
Send _message_ to Graphite Server and attempt reconnect on failure. If _autoreconnect_ was specified, attempt to reconnect if first send fails. :raises AttributeError: When the socket has not been set. :raises socket.error: When the socket connection is no longer valid.
def check_for_debug(supernova_args, nova_args): if supernova_args['debug'] and supernova_args['executable'] == 'heat': nova_args.insert(0, '-d ') elif supernova_args['debug']: nova_args.insert(0, '--debug ') return nova_args
If the user wanted to run the executable with debugging enabled, we need to apply the correct arguments to the executable. Heat is a corner case since it uses -d instead of --debug.
def _set_rules(self, rules: dict, overwrite=True): if not isinstance(rules, dict): raise TypeError('rules must be an instance of dict or Rules,' 'got %r instead' % type(rules)) if overwrite: self.rules = Rules(rules, self.default_rule) else: self.rules.update(rules)
Created a new Rules object based on the provided dict of rules.
def my_version(): if os.path.exists(resource_filename(__name__, 'version')): return resource_string(__name__, 'version') return open(os.path.join(os.path.dirname(__file__), "..", "version")).read()
Return the version, checking both packaged and development locations
def deb64_app(parser, cmd, args): parser.add_argument('value', help='the value to base64 decode, read from stdin if omitted', nargs='?') args = parser.parse_args(args) return deb64(pwnypack.main.string_value_or_stdin(args.value))
base64 decode a value.
def get_candidates(self): candidate_elections = CandidateElection.objects.filter(election=self) return [ce.candidate for ce in candidate_elections]
Get all CandidateElections for this election.
def _decompress_data(self, data): if self._decompressor: try: return self._decompressor.decompress(data) except zlib.error as error: raise ProtocolError( 'zlib error: {0}.'.format(error) ) from error else: return data
Decompress the given data and return the uncompressed data.
def pystdlib(): curver = '.'.join(str(x) for x in sys.version_info[:2]) return (set(stdlib_list.stdlib_list(curver)) | { '_LWPCookieJar', '_MozillaCookieJar', '_abcoll', 'email._parseaddr', 'email.base64mime', 'email.feedparser', 'email.quoprimime', 'encodings', 'genericpath', 'ntpath', 'nturl2path', 'os2emxpath', 'posixpath', 'sre_compile', 'sre_parse', 'unittest.case', 'unittest.loader', 'unittest.main', 'unittest.result', 'unittest.runner', 'unittest.signals', 'unittest.suite', 'unittest.util', '_threading_local', 'sre_constants', 'strop', 'repr', 'opcode', 'nt', 'encodings.aliases', '_bisect', '_codecs', '_collections', '_functools', '_hashlib', '_heapq', '_io', '_locale', '_LWPCookieJar', '_md5', '_MozillaCookieJar', '_random', '_sha', '_sha256', '_sha512', '_socket', '_sre', '_ssl', '_struct', '_subprocess', '_threading_local', '_warnings', '_weakref', '_weakrefset', '_winreg' }) - {'__main__'}
Return a set of all module-names in the Python standard library.
def clusterQueues(self): servers = yield self.getClusterServers() queues = {} for sname in servers: qs = yield self.get('rhumba.server.%s.queues' % sname) uuid = yield self.get('rhumba.server.%s.uuid' % sname) qs = json.loads(qs) for q in qs: if q not in queues: queues[q] = [] queues[q].append({'host': sname, 'uuid': uuid}) defer.returnValue(queues)
Return a dict of queues in cluster and servers running them
def bind_env(self, *input_): if len(input_) == 0: return "bind_env missing key to bind to" key = input_[0].lower() if len(input_) == 1: env_key = self._merge_with_env_prefix(key) else: env_key = input_[1] self._env[key] = env_key if self._key_delimiter in key: parts = input_[0].split(self._key_delimiter) env_info = { "path": parts[1:-1], "final_key": parts[-1], "env_key": env_key } if self._env.get(parts[0]) is None: self._env[parts[0]] = [env_info] else: self._env[parts[0]].append(env_info) return None
Binds a Vyper key to a ENV variable. ENV variables are case sensitive. If only a key is provided, it will use the env key matching the key, uppercased. `env_prefix` will be used when set when env name is not provided.
def parse(yaml, validate=True): data = read_yaml(yaml) if validate: from .validation import validate validate(data, raise_exc=True) return Config.parse(data)
Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config
def get(self, request, *args, **kwargs): measurements = Measurement.objects.all() return data_csv(self.request, measurements)
The queryset returns all measurement objects
def create_rrset(self, zone_name, rtype, owner_name, ttl, rdata): if type(rdata) is not list: rdata = [rdata] rrset = {"ttl": ttl, "rdata": rdata} return self.rest_api_connection.post("/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name, json.dumps(rrset))
Creates a new RRSet in the specified zone. Arguments: zone_name -- The zone that will contain the new RRSet. The trailing dot is optional. rtype -- The type of the RRSet. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) ttl -- The TTL value for the RRSet. rdata -- The BIND data for the RRSet as a string. If there is a single resource record in the RRSet, you can pass in the single string. If there are multiple resource records in this RRSet, pass in a list of strings.
def string_literal(content): if '"' in content and "'" in content: raise ValueError("Cannot represent this string in XPath") if '"' in content: content = "'%s'" % content else: content = '"%s"' % content return content
Choose a string literal that can wrap our string. If your string contains a ``\'`` the result will be wrapped in ``\"``. If your string contains a ``\"`` the result will be wrapped in ``\'``. Cannot currently handle strings which contain both ``\"`` and ``\'``.
def verifymessage(self, address, signature, message): return self.req("verifymessage", [address, signature, message])
Verify a signed message.
def create_followup(self, post, content, anonymous=False): try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "type": "followup", "subject": content, "content": "", "anonymous": "yes" if anonymous else "no", } return self._rpc.content_create(params)
Create a follow-up on a post `post`. It seems like if the post has `<p>` tags, then it's treated as HTML, but is treated as text otherwise. You'll want to provide `content` accordingly. :type post: dict|str|int :param post: Either the post dict returned by another API method, or the `cid` field of that post. :type content: str :param content: The content of the followup. :type anonymous: bool :param anonymous: Whether or not to post anonymously. :rtype: dict :returns: Dictionary with information about the created follow-up.
def __replace_within_document(self, document, occurrences, replacement_pattern): cursor = QTextCursor(document) cursor.beginEditBlock() offset = count = 0 for occurence in sorted(occurrences, key=lambda x: x.position): cursor.setPosition(offset + occurence.position, QTextCursor.MoveAnchor) cursor.setPosition(offset + occurence.position + occurence.length, QTextCursor.KeepAnchor) cursor.insertText(replacement_pattern) offset += len(replacement_pattern) - occurence.length count += 1 cursor.endEditBlock() return count
Replaces given pattern occurrences in given document using given settings. :param document: Document. :type document: QTextDocument :param replacement_pattern: Replacement pattern. :type replacement_pattern: unicode :return: Replaced occurrences count. :rtype: int
def RfiltersBM(dataset,database,host=rbiomart_host): biomaRt = importr("biomaRt") ensemblMart=biomaRt.useMart(database, host=host) ensembl=biomaRt.useDataset(dataset, mart=ensemblMart) print(biomaRt.listFilters(ensembl))
Lists BioMart filters through a RPY2 connection. :param dataset: a dataset listed in RdatasetsBM() :param database: a database listed in RdatabasesBM() :param host: address of the host server, default='www.ensembl.org' :returns: nothing
def pairwise_ellpitical_binary(sources, eps, far=None): if far is None: far = max(a.a/3600 for a in sources) l = len(sources) distances = np.zeros((l, l), dtype=bool) for i in range(l): for j in range(i, l): if i == j: distances[i, j] = False continue src1 = sources[i] src2 = sources[j] if src2.dec - src1.dec > far: break if abs(src2.ra - src1.ra)*np.cos(np.radians(src1.dec)) > far: continue distances[i, j] = norm_dist(src1, src2) > eps distances[j, i] = distances[i, j] return distances
Do a pairwise comparison of all sources and determine if they have a normalized distance within eps. Form this into a matrix of shape NxN. Parameters ---------- sources : list A list of sources (objects with parameters: ra,dec,a,b,pa) eps : float Normalised distance constraint. far : float If sources have a dec that differs by more than this amount then they are considered to be not matched. This is a short-cut around performing GCD calculations. Returns ------- prob : numpy.ndarray A 2d array of True/False. See Also -------- :func:`AegeanTools.cluster.norm_dist`
def low_limit(self) -> Optional[Union[int, float]]: return self._get_field_value(SpecialDevice.PROP_LOW_LIMIT)
Low limit setting for a special sensor. For LS-10/LS-20 base units this is the alarm low limit. For LS-30 base units, this is either alarm OR control low limit, as indicated by special_status ControlAlarm bit flag.
def status(self, status_in): if isinstance(status_in, PIDStatus): status_in = [status_in, ] return self.filter( self._filtered_pid_class.status.in_(status_in) )
Filter the PIDs based on their status.
def addon_name(self): with self.selenium.context(self.selenium.CONTEXT_CHROME): el = self.find_description() return el.find_element(By.CSS_SELECTOR, "b").text
Provide access to the add-on name. Returns: str: Add-on name.
def escape(string, escape_pattern): try: return string.translate(escape_pattern) except AttributeError: warnings.warn("Non-string-like data passed. " "Attempting to convert to 'str'.") return str(string).translate(tag_escape)
Assistant function for string escaping
def find_methods(self, classname=".*", methodname=".*", descriptor=".*", accessflags=".*", no_external=False): for cname, c in self.classes.items(): if re.match(classname, cname): for m in c.get_methods(): z = m.get_method() if no_external and isinstance(z, ExternalMethod): continue if re.match(methodname, z.get_name()) and \ re.match(descriptor, z.get_descriptor()) and \ re.match(accessflags, z.get_access_flags_string()): yield m
Find a method by name using regular expression. This method will return all MethodClassAnalysis objects, which match the classname, methodname, descriptor and accessflags of the method. :param classname: regular expression for the classname :param methodname: regular expression for the method name :param descriptor: regular expression for the descriptor :param accessflags: regular expression for the accessflags :param no_external: Remove external method from the output (default False) :rtype: generator of `MethodClassAnalysis`
def hashVariantAnnotation(cls, gaVariant, gaVariantAnnotation): treffs = [treff.id for treff in gaVariantAnnotation.transcript_effects] return hashlib.md5( "{}\t{}\t{}\t".format( gaVariant.reference_bases, tuple(gaVariant.alternate_bases), treffs) ).hexdigest()
Produces an MD5 hash of the gaVariant and gaVariantAnnotation objects
def fetch(sequence, time='hour'): import StringIO import gzip import requests if time not in ['minute','hour','day']: raise ValueError('The supplied type of replication file does not exist.') sqn = str(sequence).zfill(9) url = "https://planet.osm.org/replication/%s/%s/%s/%s.osc.gz" %\ (time, sqn[0:3], sqn[3:6], sqn[6:9]) content = requests.get(url) if content.status_code == 404: raise EnvironmentError('Diff file cannot be found.') content = StringIO.StringIO(content.content) data_stream = gzip.GzipFile(fileobj=content) return data_stream
Fetch an OpenStreetMap diff file. Parameters ---------- sequence : string or integer Diff file sequence desired. Maximum of 9 characters allowed. The value should follow the two directory and file name structure from the site, e.g. https://planet.osm.org/replication/hour/NNN/NNN/NNN.osc.gz (with leading zeros optional). time : {'minute', 'hour', or 'day'}, optional Denotes the diff file time granulation to be downloaded. The value must be a valid directory at https://planet.osm.org/replication/. Returns ------- data_stream : class A file-like class containing a decompressed data stream from the fetched diff file in string format.
def _increment_current_byte(self): if self.tape[self.pointer] is None: self.tape[self.pointer] = 1 elif self.tape[self.pointer] == self.MAX_CELL_SIZE: self.tape[self.pointer] = self.MIN_CELL_SIZE else: self.tape[self.pointer] += 1
Increments the value of the current byte at the pointer. If the result is over 255, then it will overflow to 0
def is_binary_operator(oper): symbols = [ ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+', '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=', '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||'] if not isinstance(oper, calldef_members.operator_t): return False if oper.symbol not in symbols: return False if isinstance(oper, calldef_members.member_operator_t): if len(oper.arguments) == 1: return True return False if len(oper.arguments) == 2: return True return False
returns True, if operator is binary operator, otherwise False
def listen(self, message_consumer): while not self._rfile.closed: request_str = self._read_message() if request_str is None: break try: message_consumer(json.loads(request_str.decode('utf-8'))) except ValueError: log.exception("Failed to parse JSON message %s", request_str) continue
Blocking call to listen for messages on the rfile. Args: message_consumer (fn): function that is passed each message as it is read off the socket.
def _validate_initial_centers(initial_centers): if not (isinstance(initial_centers, _SFrame)): raise TypeError("Input 'initial_centers' must be an SFrame.") if initial_centers.num_rows() == 0 or initial_centers.num_columns() == 0: raise ValueError("An 'initial_centers' argument is provided " + "but has no data.")
Validate the initial centers. Parameters ---------- initial_centers : SFrame Initial cluster center locations, in SFrame form.
def clone(self): temp = self.__class__() temp.base = self.base return temp
Return a new bitfield with the same value. The returned value is a copy, and so is no longer linked to the original bitfield. This is important when the original is located at anything other than normal memory, with accesses to it either slow or having side effects. Creating a clone, and working against that clone, means that only one read will occur.
def call_only_once(func): @functools.wraps(func) def wrapper(*args, **kwargs): self = args[0] assert func.__name__ in dir(self), "call_only_once can only be used on method or property!" if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'): cache = self._CALL_ONLY_ONCE_CACHE = set() else: cache = self._CALL_ONLY_ONCE_CACHE cls = type(self) is_method = inspect.isfunction(getattr(cls, func.__name__)) assert func not in cache, \ "{} {}.{} can only be called once per object!".format( 'Method' if is_method else 'Property', cls.__name__, func.__name__) cache.add(func) return func(*args, **kwargs) return wrapper
Decorate a method or property of a class, so that this method can only be called once for every instance. Calling it more than once will result in exception.
def normalize_filepath(filepath): r filename = os.path.basename(filepath) dirpath = filepath[:-len(filename)] cre_controlspace = re.compile(r'[\t\r\n\f]+') new_filename = cre_controlspace.sub('', filename) if not new_filename == filename: logger.warning('Stripping whitespace from filename: {} => {}'.format( repr(filename), repr(new_filename))) filename = new_filename filename = filename.lower() filename = normalize_ext(filename) if dirpath: dirpath = dirpath[:-1] return os.path.join(dirpath, filename) return filename
r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz. >>> normalize_filepath('/Hello_World.txt\n') 'hello_world.txt' >>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ') 'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz'
def connectToMissing(self) -> set: missing = self.reconcileNodeReg() if not missing: return missing logger.info("{}{} found the following missing connections: {}". format(CONNECTION_PREFIX, self, ", ".join(missing))) for name in missing: try: self.connect(name, ha=self.registry[name]) except (ValueError, KeyError, PublicKeyNotFoundOnDisk, VerKeyNotFoundOnDisk) as ex: logger.warning('{}{} cannot connect to {} due to {}'. format(CONNECTION_PREFIX, self, name, ex)) return missing
Try to connect to the missing nodes
def deviance(self, endog, mu, freq_weights=1., scale=1.): r endog_mu = self._clean(endog/mu) return 2*np.sum(freq_weights*((endog-mu)/mu-np.log(endog_mu)))
r""" Gamma deviance function Parameters ----------- endog : array-like Endogenous response variable mu : array-like Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional An optional scale argument. The default is 1. Returns ------- deviance : float Deviance function as defined below
def ret_list_minions(self): tgt = _tgt_set(self.tgt) return self._ret_minions(tgt.intersection)
Return minions that match via list
def update(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value)
Creates or updates a property for the instance for each parameter.