code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def send(self, data): if self._device: if isinstance(data, str): data = str.encode(data) if sys.version_info < (3,): if isinstance(data, unicode): data = bytes(data) self._device.write(data)
Sends data to the `AlarmDecoder`_ device. :param data: data to send :type data: string
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''): rv = _unquote_to_bytes(string, unsafe) if charset is not None: rv = rv.decode(charset, errors) return rv
URL decode a single string with a given encoding. If the charset is set to `None` no unicode decoding is performed and raw bytes are returned. :param s: the string to unquote. :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param errors: the error handling for the charset decoding.
def push(self, el): count = next(self.counter) heapq.heappush(self._queue, (el, count))
Put a new element in the queue.
def autoencoder_residual_text(): hparams = autoencoder_residual() hparams.bottleneck_bits = 32 hparams.batch_size = 1024 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.autoregressive_mode = "none" hparams.sample_width = 1 return hparams
Residual autoencoder model for text.
def isSet(self, param): param = self._resolveParam(param) return param in self._paramMap
Checks whether a param is explicitly set by user.
def get_hypo_location(self, mesh_spacing, hypo_loc=None): mesh = self.mesh centroid = mesh.get_middle_point() if hypo_loc is None: return centroid total_len_y = (len(mesh.depths) - 1) * mesh_spacing y_distance = hypo_loc[1] * total_len_y y_node = int(numpy.round(y_distance / mesh_spacing)) total_len_x = (len(mesh.lons[y_node]) - 1) * mesh_spacing x_distance = hypo_loc[0] * total_len_x x_node = int(numpy.round(x_distance / mesh_spacing)) hypocentre = Point(mesh.lons[y_node][x_node], mesh.lats[y_node][x_node], mesh.depths[y_node][x_node]) return hypocentre
The method determines the location of the hypocentre within the rupture :param mesh: :class:`~openquake.hazardlib.geo.mesh.Mesh` of points :param mesh_spacing: The desired distance between two adjacent points in source's ruptures' mesh, in km. Mainly this parameter allows to balance the trade-off between time needed to compute the distance between the rupture surface and a site and the precision of that computation. :param hypo_loc: Hypocentre location as fraction of rupture plane, as a tuple of (Along Strike, Down Dip), e.g. a hypocentre located in the centroid of the rupture would be input as (0.5, 0.5), whereas a hypocentre located in a position 3/4 along the length, and 1/4 of the way down dip of the rupture plane would be entered as (0.75, 0.25). :returns: Hypocentre location as instance of :class:`~openquake.hazardlib.geo.point.Point`
def plot_points(points, lattice=None, coords_are_cartesian=False, fold=False, ax=None, **kwargs): ax, fig, plt = get_ax3d_fig_plt(ax) if "color" not in kwargs: kwargs["color"] = "b" if (not coords_are_cartesian or fold) and lattice is None: raise ValueError( "coords_are_cartesian False or fold True require the lattice") for p in points: if fold: p = fold_point(p, lattice, coords_are_cartesian=coords_are_cartesian) elif not coords_are_cartesian: p = lattice.get_cartesian_coords(p) ax.scatter(*p, **kwargs) return fig, ax
Adds Points to a matplotlib Axes Args: points: list of coordinates lattice: Lattice object used to convert from reciprocal to cartesian coordinates coords_are_cartesian: Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. Requires lattice if False. fold: whether the points should be folded inside the first Brillouin Zone. Defaults to False. Requires lattice if True. ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: kwargs passed to the matplotlib function 'scatter'. Color defaults to blue Returns: matplotlib figure and matplotlib ax
def file_or_default(path, default, function = None): try: result = file_get_contents(path) if function != None: return function(result) return result except IOError as e: if e.errno == errno.ENOENT: return default raise
Return a default value if a file does not exist
def datetime_parser(s): try: ts = arrow.get(s) if ts.tzinfo == arrow.get().tzinfo: ts = ts.replace(tzinfo='local') except: c = pdt.Calendar() result, what = c.parse(s) ts = None if what in (1, 2, 3): ts = datetime.datetime(*result[:6]) ts = arrow.get(ts) ts = ts.replace(tzinfo='local') return ts if ts is None: raise ValueError("Cannot parse timestamp '" + s + "'") return ts
Parse timestamp s in local time. First the arrow parser is used, if it fails, the parsedatetime parser is used. :param s: :return:
def _isna_old(obj): if is_scalar(obj): return libmissing.checknull_old(obj) elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=_isna_old)) elif isinstance(obj, list): return _isna_ndarraylike_old(np.asarray(obj, dtype=object)) elif hasattr(obj, '__array__'): return _isna_ndarraylike_old(np.asarray(obj)) else: return obj is None
Detect missing values. Treat None, NaN, INF, -INF as null. Parameters ---------- arr: ndarray or object value Returns ------- boolean ndarray or boolean
def get_file(self, fid): url = self.get_file_url(fid) return self.conn.get_raw_data(url)
Get file from WeedFS. Returns file content. May be problematic for large files as content is stored in memory. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Content of the file with provided fid or None if file doesn't exist on the server .. versionadded:: 0.3.1
def make_tag(cls, tag_name): if cls.cm: return cls.cm.make_tag(tag_name) return Tag(tag_name.strip())
Make a Tag object from a tag name. Registers it with the content manager if possible.
def _starts_with(field, filter_value): valid = False if field.startswith(filter_value): valid = True return valid
Validate field starts with provided value. Args: filter_value (string): A string or list of values. Returns: (boolean): Results of validation
def asmono(samples:np.ndarray, channel:Union[int, str]=0) -> np.ndarray: if numchannels(samples) == 1: if isinstance(samples[0], float): return samples elif isinstance(samples[0], np.dnarray): return np.reshape(samples, (len(samples),)) else: raise TypeError("Samples should be numeric, found: %s" % str(type(samples[0]))) if isinstance(channel, int): return samples[:, channel] elif channel == 'mix': return _mix(samples, scale_by_numchannels=True) else: raise ValueError("channel has to be an integer indicating a channel," " or 'mix' to mix down all channels")
convert samples to mono if they are not mono already. The returned array will always have the shape (numframes,) channel: the channel number to use, or 'mix' to mix-down all channels
def create_indices(catalog_slug): mapping = { "mappings": { "layer": { "properties": { "layer_geoshape": { "type": "geo_shape", "tree": "quadtree", "precision": REGISTRY_MAPPING_PRECISION } } } } } ESHypermap.es.indices.create(catalog_slug, ignore=[400, 404], body=mapping)
Create ES core indices
def make_connection(self): "Make a fresh connection." connection = self.connection_class(**self.connection_kwargs) self._connections.append(connection) return connection
Make a fresh connection.
def points_to_spline_entity(points, smooth=None, count=None): from scipy.interpolate import splprep if count is None: count = len(points) if smooth is None: smooth = 0.002 points = np.asanyarray(points, dtype=np.float64) closed = np.linalg.norm(points[0] - points[-1]) < tol.merge knots, control, degree = splprep(points.T, s=smooth)[0] control = np.transpose(control) index = np.arange(len(control)) if closed: control[0] = control[[0, -1]].mean(axis=0) control = control[:-1] index[-1] = index[0] entity = entities.BSpline(points=index, knots=knots, closed=closed) return entity, control
Create a spline entity from a curve in space Parameters ----------- points: (n, dimension) float, points in space smooth: float, smoothing amount count: int, number of samples in result Returns --------- entity: entities.BSpline object with points indexed at zero control: (m, dimension) float, new vertices for entity
def process(obj): merged = merge(obj) if obj.get('full'): print 'Saving: {} ({:.2f}kB)'.format(obj['full'], len(merged)/1024.0) _save(obj['full'], merged) else: print 'Full merged size: {:.2f}kB'.format(len(merged)/1024.0) if obj.get('jsmin'): jsMin(merged, obj['jsmin']) if obj.get('cssmin'): cssMin(merged, obj['cssmin'])
Process each block of the merger object.
def isreference(a): return False return id(a) != id(copy.copy(a)) check = ('__dict__', '__slots__') for attr in check: try: getattr(a, attr) except (SystemExit, KeyboardInterrupt): raise except: pass else: return True return False
Tell whether a variable is an object reference. Due to garbage collection, some objects happen to get the id of a distinct variable. As a consequence, linking is not ready yet and `isreference` returns ``False``.
def STRUCT_DECL(self, cursor, num=None): return self._record_decl(cursor, typedesc.Structure, num)
Handles Structure declaration. Its a wrapper to _record_decl.
def pca_overview(adata, **params): show = params['show'] if 'show' in params else None if 'show' in params: del params['show'] scatterplots.pca(adata, **params, show=False) pca_loadings(adata, show=False) pca_variance_ratio(adata, show=show)
\ Plot PCA results. The parameters are the ones of the scatter plot. Call pca_ranking separately if you want to change the default settings. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. color : string or list of strings, optional (default: `None`) Keys for observation/cell annotation either as list `["ann1", "ann2"]` or string `"ann1,ann2,..."`. use_raw : `bool`, optional (default: `True`) Use `raw` attribute of `adata` if present. {scatter_bulk} show : bool, optional (default: `None`) Show the plot, do not return axis. save : `bool` or `str`, optional (default: `None`) If `True` or a `str`, save the figure. A string is appended to the default filename. Infer the filetype if ending on {{'.pdf', '.png', '.svg'}}.
def make_channel(name, samples, data=None, verbose=False): if verbose: llog = log['make_channel'] llog.info("creating channel {0}".format(name)) chan = Channel('channel_{0}'.format(name)) chan.SetStatErrorConfig(0.05, "Poisson") if data is not None: if verbose: llog.info("setting data") chan.SetData(data) for sample in samples: if verbose: llog.info("adding sample {0}".format(sample.GetName())) chan.AddSample(sample) return chan
Create a Channel from a list of Samples
def disqualified(self, num, natural=True, **kwargs): search_type = 'natural' if natural else 'corporate' baseuri = (self._BASE_URI + 'disqualified-officers/{}/{}'.format(search_type, num)) res = self.session.get(baseuri, params=kwargs) self.handle_http_error(res) return res
Search for disqualified officers by officer ID. Searches for natural disqualifications by default. Specify natural=False to search for corporate disqualifications. Args: num (str): Company number to search on. natural (Optional[bool]): Natural or corporate search kwargs (dict): additional keywords passed into requests.session.get *params* keyword.
def get(self, item): resource = super(CloudDatabaseManager, self).get(item) resource.volume = CloudDatabaseVolume(resource, resource.volume) return resource
This additional code is necessary to properly return the 'volume' attribute of the instance as a CloudDatabaseVolume object instead of a raw dict.
def visit_class(rec, cls, op): if isinstance(rec, MutableMapping): if "class" in rec and rec.get("class") in cls: op(rec) for d in rec: visit_class(rec[d], cls, op) if isinstance(rec, MutableSequence): for d in rec: visit_class(d, cls, op)
Apply a function to with "class" in cls.
def bar(x, y, **kwargs): kwargs['x'] = x kwargs['y'] = y return _draw_mark(Bars, **kwargs)
Draws a bar chart in the current context figure. Parameters ---------- x: numpy.ndarray, 1d The x-coordinates of the data points. y: numpy.ndarray, 1d The y-coordinates of the data pints. options: dict (default: {}) Options for the scales to be created. If a scale labeled 'x' is required for that mark, options['x'] contains optional keyword arguments for the constructor of the corresponding scale type. axes_options: dict (default: {}) Options for the axes to be created. If an axis labeled 'x' is required for that mark, axes_options['x'] contains optional keyword arguments for the constructor of the corresponding axis type.
def fix_return_value(v, method_name, method=None, checker=None): method_name = (method_name or method.__func__.__name__).replace("check_","") if v is None or not isinstance(v, Result): v = Result(value=v, name=method_name) v.name = v.name or method_name v.checker = checker v.check_method = method return v
Transforms scalar return values into Result.
def add_firmware_manifest(self, name, datafile, key_table_file=None, **kwargs): kwargs.update({ 'name': name, 'url': datafile, }) if key_table_file is not None: kwargs.update({'key_table_url': key_table_file}) firmware_manifest = FirmwareManifest._create_request_map(kwargs) api = self._get_api(update_service.DefaultApi) return FirmwareManifest( api.firmware_manifest_create(**firmware_manifest) )
Add a new manifest reference. :param str name: Manifest file short name (Required) :param str datafile: The file object or path to the manifest file (Required) :param str key_table_file: The file object or path to the key_table file (Optional) :param str description: Manifest file description :return: the newly created manifest file object :rtype: FirmwareManifest
def add_item(self, item): self.items.append(item) self.last_updated = datetime.datetime.now()
Append item to the list. :attr:`last_updated` will be set to :py:meth:`datetime.datetime.now`. :param item: Something to append to :attr:`items`.
def get_clients_per_page(self, per_page=1000, page=1, params=None): return self._get_resource_per_page(resource=CLIENTS, per_page=per_page, page=page, params=params)
Get clients per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
def is_any_clicked(self): for key in range(len(self.current_state.key_states)): if self.is_clicked(key): return True return False
Is any button clicked?
def add_attribute_listener(self, attr_name, *args, **kwargs): attr_name = attr_name.upper() return super(Parameters, self).add_attribute_listener(attr_name, *args, **kwargs)
Add a listener callback on a particular parameter. The callback can be removed using :py:func:`remove_attribute_listener`. .. note:: The :py:func:`on_attribute` decorator performs the same operation as this method, but with a more elegant syntax. Use ``add_attribute_listener`` only if you will need to remove the observer. The callback function is invoked only when the parameter changes. The callback arguments are: * ``self`` - the associated :py:class:`Parameters`. * ``attr_name`` - the parameter name. This can be used to infer which parameter has triggered if the same callback is used for watching multiple parameters. * ``msg`` - the new parameter value (so you don't need to re-query the vehicle object). The example below shows how to get callbacks for the ``THR_MIN`` parameter: .. code:: python #Callback function for the THR_MIN parameter def thr_min_callback(self, attr_name, value): print " PARAMETER CALLBACK: %s changed to: %s" % (attr_name, value) #Add observer for the vehicle's THR_MIN parameter vehicle.parameters.add_attribute_listener('THR_MIN', thr_min_callback) See :ref:`vehicle_state_observing_parameters` for more information. :param String attr_name: The name of the parameter to watch (or '*' to watch all parameters). :param args: The callback to invoke when a change in the parameter is detected.
def decamelise(text): s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower()
Convert CamelCase to lower_and_underscore.
def getpeercert(self, binary_form=False): try: peer_cert = _X509(SSL_get_peer_certificate(self._ssl.value)) except openssl_error(): return if binary_form: return i2d_X509(peer_cert.value) if self._cert_reqs == CERT_NONE: return {} return decode_cert(peer_cert)
Retrieve the peer's certificate When binary form is requested, the peer's DER-encoded certficate is returned if it was transmitted during the handshake. When binary form is not requested, and the peer's certificate has been validated, then a certificate dictionary is returned. If the certificate was not validated, an empty dictionary is returned. In all cases, None is returned if no certificate was received from the peer.
def copy_doc(klass, fnname): base_meth, base_func = __get_meth_func(klass.__base__, fnname) meth, func = __get_meth_func(klass, fnname) func.__doc__ = base_func.__doc__
Copies documentation string of a method from the super class into the rewritten method of the given class
def print_timer(self, timer_name, **kwargs): if hasattr(self, timer_name): _delete_timer = kwargs.get("delete", False) print("|-------- {} [Time Log Calculation]-----------------|".format(\ timer_name)) print("StartDiff\tLastNodeDiff\tNodeName") time_log = getattr(self, timer_name) start_time = time_log[0]['time'] previous_time = start_time for entry in time_log: time_diff = (entry['time'] - previous_time) *1000 time_from_start = (entry['time'] - start_time) * 1000 previous_time = entry['time'] print("{:.1f}\t\t{:.1f}\t\t{}".format(time_from_start, time_diff, entry['node'])) print("|--------------------------------------------------------|") if _delete_timer: self.delete_timer(timer_name)
prints the timer to the terminal keyword args: delete -> True/False -deletes the timer after printing
def newText(content): ret = libxml2mod.xmlNewText(content) if ret is None:raise treeError('xmlNewText() failed') return xmlNode(_obj=ret)
Creation of a new text node.
def activity(self, *args, **kwargs): _activities = self.activities(*args, **kwargs) if len(_activities) == 0: raise NotFoundError("No activity fits criteria") if len(_activities) != 1: raise MultipleFoundError("Multiple activities fit criteria") return _activities[0]
Search for a single activity. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param pk: id (primary key) of the activity to retrieve :type pk: basestring or None :param name: filter the activities by name :type name: basestring or None :param scope: filter by scope id :type scope: basestring or None :return: a single :class:`models.Activity` :raises NotFoundError: When no `Activity` is found :raises MultipleFoundError: When more than a single `Activity` is found
def get_coiledcoil_region(self, cc_number=0, cutoff=7.0, min_kihs=2): g = self.filter_graph(self.graph, cutoff=cutoff, min_kihs=min_kihs) ccs = sorted(networkx.connected_component_subgraphs(g, copy=True), key=lambda x: len(x.nodes()), reverse=True) cc = ccs[cc_number] helices = [x for x in g.nodes() if x.number in cc.nodes()] assigned_regions = self.get_assigned_regions(helices=helices, include_alt_states=False, complementary_only=True) coiledcoil_monomers = [h.get_slice_from_res_id(*assigned_regions[h.number]) for h in helices] return Assembly(coiledcoil_monomers)
Assembly containing only assigned regions (i.e. regions with contiguous KnobsIntoHoles.
def generate_thumbnail_download_link_vimeo(video_id_from_shortcode): video_metadata = urlopen("https://vimeo.com/api/v2/video/" + str(video_id_from_shortcode) + ".json").read() video_metadata_parsed = json.loads(video_metadata.decode('utf-8')) video_thumbnail_large_location = video_metadata_parsed[0]['thumbnail_large'] return video_thumbnail_large_location
Thumbnail URL generator for Vimeo videos.
def _delete_membership(self, pipeline=None): Set(self._key['all'], pipeline=pipeline).remove(self.id)
Removes the id of the object to the set of all objects of the same class.
def identity(self): res = self.app.get_id() res.update({"start_time": self.start_time}) res.update({"running_id": self.running_id}) return res
Get the daemon identity This will return an object containing some properties: - alignak: the Alignak instance name - version: the Alignak version - type: the daemon type - name: the daemon name :return: daemon identity :rtype: dict
def close_open_fds(keep_files=[]): keep_fds = set() for file in keep_files: if isinstance(file, int): keep_fds.add(file) else: try: keep_fds.add(file.fileno()) except Exception: pass for fd in os.listdir("/proc/self/fd"): fd = int(fd) if fd not in keep_fds: try: os.close(fd) except OSError: pass
Close all open file descriptors except those in a given set. @param keep_files: an iterable of file descriptors or file-like objects.
def draw360_to_texture(self, cubetexture, **kwargs): assert self.camera.projection.aspect == 1. and self.camera.projection.fov_y == 90 if not isinstance(cubetexture, TextureCube): raise ValueError("Must render to TextureCube") old_rotation = self.camera.rotation self.camera.rotation = self.camera.rotation.to_euler(units='deg') for face, rotation in enumerate([[180, -90, 0], [180, 90, 0], [90, 0, 0], [-90, 0, 0], [180, 0, 0], [0, 0, 180]]): self.camera.rotation.xyz = rotation cubetexture.attach_to_fbo(face) self.draw(**kwargs) self.camera.rotation = old_rotation
Draw each visible mesh in the scene from the perspective of the scene's camera and lit by its light, and applies it to each face of cubetexture, which should be currently bound to an FBO.
def setConfigKey(key, value): configFile = ConfigurationManager._configFile() return JsonDataManager(configFile).setKey(key, value)
Sets the config data value for the specified dictionary key
def git_commit(targets, message, force=False, sign=False): root = get_root() target_paths = [] for t in targets: target_paths.append(os.path.join(root, t)) with chdir(root): result = run_command('git add{} {}'.format(' -f' if force else '', ' '.join(target_paths))) if result.code != 0: return result return run_command('git commit{} -m "{}"'.format(' -S' if sign else '', message))
Commit the changes for the given targets.
def V_(x, requires_grad=False, volatile=False): return create_variable(x, volatile=volatile, requires_grad=requires_grad)
equivalent to create_variable, which creates a pytorch tensor
def delayed_redraw(self): with self._defer_lock: whence = self._defer_whence self._defer_whence = self._defer_whence_reset flag = self._defer_flag self._defer_flag = False if flag: self.redraw_now(whence=whence)
Handle delayed redrawing of the canvas.
def mainloop(self): while self.keep_going: with self.lock: if self.on_connect and not self.readable(2): self.on_connect() self.on_connect = None if not self.keep_going: break self.process_once()
Handles events and calls their handler for infinity.
def is_extension_supported(request, extension_alias): extensions = list_extensions(request) for extension in extensions: if extension['alias'] == extension_alias: return True else: return False
Check if a specified extension is supported. :param request: django request object :param extension_alias: neutron extension alias
def __get_value(self, field_name): value = request.values.get(field_name) if value is None: if self.json_form_data is None: value = None elif field_name in self.json_form_data: value = self.json_form_data[field_name] return value
Get request Json value by field name
def get_certificates(self): for certificate in self.user_data.certificates: certificate['datetime'] = certificate['datetime'].strip() return self.user_data.certificates
Get user's certificates.
def create(cls, receiver_id, user_id=None): event = cls(id=uuid.uuid4(), receiver_id=receiver_id, user_id=user_id) event.payload = event.receiver.extract_payload() return event
Create an event instance.
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list): cand_indices = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue cand_indices.append(i) num_to_mask = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) shuffle(cand_indices) mask_indices = sorted(sample(cand_indices, num_to_mask)) masked_token_labels = [] for index in mask_indices: if random() < 0.8: masked_token = "[MASK]" else: if random() < 0.5: masked_token = tokens[index] else: masked_token = choice(vocab_list) masked_token_labels.append(tokens[index]) tokens[index] = masked_token return tokens, mask_indices, masked_token_labels
Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but with several refactors to clean it up and remove a lot of unnecessary variables.
def from_tuple(self, t): if len(t) > 1: self.id = t[0] self.sitting = t[1] else: self.sitting = t[0] self.id = None return self
Set this person from tuple :param t: Tuple representing a person (sitting[, id]) :type t: (bool) | (bool, None | str | unicode | int) :rtype: Person
def undisplay(self): self._tools.pop() self._justClear() for tool in self._tools: self._justDisplay(tool)
Undisplays the top tool. This actually forces a complete re-render.
def get_structure_by_material_id(self, material_id, final=True, conventional_unit_cell=False): prop = "final_structure" if final else "initial_structure" data = self.get_data(material_id, prop=prop) if conventional_unit_cell: data[0][prop] = SpacegroupAnalyzer(data[0][prop]). \ get_conventional_standard_structure() return data[0][prop]
Get a Structure corresponding to a material_id. Args: material_id (str): Materials Project material_id (a string, e.g., mp-1234). final (bool): Whether to get the final structure, or the initial (pre-relaxation) structure. Defaults to True. conventional_unit_cell (bool): Whether to get the standard conventional unit cell Returns: Structure object.
def is_predecessor_of_other(self, predecessor, others): return any(predecessor in self._predecessors_by_id[o] for o in others)
Returns whether the predecessor is a predecessor or a predecessor of a predecessor...of any of the others. Args: predecessor (str): The txn id of the predecessor. others (list(str)): The txn id of the successor. Returns: (bool)
def write_strings_on_files_between_markers(filenames: list, strings: list, marker: str): r assert len(filenames) == len(strings) if len(filenames) > 0: for f in filenames: assert isinstance(f, str) if len(strings) > 0: for s in strings: assert isinstance(s, str) file_id = 0 for f in filenames: write_string_on_file_between_markers(f, strings[file_id], marker) file_id += 1
r"""Write the table of contents on multiple files. :parameter filenames: the files that needs to be read or modified. :parameter strings: the strings that will be written on the file. Each string is associated with one file. :parameter marker: a marker that will identify the start and the end of the string. :type filenames: list :type string: list :type marker: str :returns: None :rtype: None :raises: an fpyutils exception or a built-in exception.
def TypeFactory(type_): if isinstance(type_, type) and issubclass(type_, Type): return type_ for x in __types__: if x.represents(type_): return x.get(type_) raise UnknownType(type_)
This function creates a standard form type from a simplified form. >>> from datetime import date, datetime >>> from pyws.functions.args import TypeFactory >>> from pyws.functions.args import String, Integer, Float, Date, DateTime >>> TypeFactory(str) == String True >>> TypeFactory(float) == Float True >>> TypeFactory(date) == Date True >>> TypeFactory(datetime) == DateTime True >>> from operator import attrgetter >>> from pyws.functions.args import Dict >>> dct = TypeFactory({0: 'HelloWorldDict', 'hello': str, 'world': int}) >>> issubclass(dct, Dict) True >>> dct.__name__ 'HelloWorldDict' >>> fields = sorted(dct.fields, key=attrgetter('name')) >>> len(dct.fields) 2 >>> fields[0].name == 'hello' True >>> fields[0].type == String True >>> fields[1].name == 'world' True >>> fields[1].type == Integer True >>> from pyws.functions.args import List >>> lst = TypeFactory([int]) >>> issubclass(lst, List) True >>> lst.__name__ 'IntegerList' >>> lst.element_type == Integer True
def _viscounts2radiance(counts, slope, offset): rad = counts * slope + offset return rad.clip(min=0)
Convert VIS counts to radiance References: [VIS] Args: counts: Raw detector counts slope: Slope [W m-2 um-1 sr-1] offset: Offset [W m-2 um-1 sr-1] Returns: Radiance [W m-2 um-1 sr-1]
def documents(cls, filter=None, **kwargs): documents = [cls(document) for document in cls.find(filter, **kwargs)] return [document for document in documents if document.document]
Returns a list of Documents if any document is filtered
def make_input_from_plain_string(sentence_id: SentenceId, string: str) -> TranslatorInput: return TranslatorInput(sentence_id, tokens=list(data_io.get_tokens(string)), factors=None)
Returns a TranslatorInput object from a plain string. :param sentence_id: Sentence id. :param string: An input string. :return: A TranslatorInput.
def write_to(self, content, content_type): try: self._api.object_upload(self._bucket, self._key, content, content_type) except Exception as e: raise e
Writes text content to this item. Args: content: the text content to be written. content_type: the type of text content. Raises: Exception if there was an error requesting the item's content.
def likelihood_markov_blanket(self, beta): states = beta[self.z_no:self.z_no+self.data_length] parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) scale, shape, skewness = self._get_scale_and_shape(parm) return self.family.markov_blanket(self.data, self.link(states), scale, shape, skewness)
Creates likelihood markov blanket of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- - Negative loglikelihood
def get_ips(self, interface=None, family=None, scope=None, timeout=0): kwargs = {} if interface: kwargs['interface'] = interface if family: kwargs['family'] = family if scope: kwargs['scope'] = scope ips = None timeout = int(os.environ.get('LXC_GETIP_TIMEOUT', timeout)) while not ips: ips = _lxc.Container.get_ips(self, **kwargs) if timeout == 0: break timeout -= 1 time.sleep(1) return ips
Get a tuple of IPs for the container.
def list_uplink_dvportgroup(dvs, service_instance=None): proxy_type = get_proxy_type() if proxy_type == 'esxdatacenter': datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] dc_ref = _get_proxy_target(service_instance) elif proxy_type == 'esxcluster': datacenter = __salt__['esxcluster.get_details']()['datacenter'] dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) if not dvs_refs: raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' 'retrieved'.format(dvs)) uplink_pg_ref = salt.utils.vmware.get_uplink_dvportgroup(dvs_refs[0]) return _get_dvportgroup_dict(uplink_pg_ref)
Returns the uplink portgroup of a distributed virtual switch. dvs Name of the DVS containing the portgroup. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.list_uplink_dvportgroup dvs=dvs_name
def write(self, file, text, subvars={}, trim_leading_lf=True): file.write(self.substitute(text, subvars=subvars, trim_leading_lf=trim_leading_lf))
write to a file with variable substitution
def get_attribute(self, code, default=None): try: return self.get(code=code).value except models.ObjectDoesNotExist: return default
Get attribute for user
def simplify(self, e=None): if e is None: return self._solver.simplify() elif isinstance(e, (int, float, bool)): return e elif isinstance(e, claripy.ast.Base) and e.op in claripy.operations.leaf_operations_concrete: return e elif isinstance(e, SimActionObject) and e.op in claripy.operations.leaf_operations_concrete: return e.ast elif not isinstance(e, (SimActionObject, claripy.ast.Base)): return e else: return self._claripy_simplify(e)
Simplifies `e`. If `e` is None, simplifies the constraints of this state.
def get_area(self): return (self.p2.x-self.p1.x)*(self.p2.y-self.p1.y)
Calculate area of bounding box.
def checkQueryRange(self, start, end): condition = ( (start < 0 or end > self.getLength()) or start > end or start == end) if condition: raise exceptions.ReferenceRangeErrorException( self.getId(), start, end)
Checks to ensure that the query range is valid within this reference. If not, raise ReferenceRangeErrorException.
def rm(ctx, dataset, kwargs): "removes the dataset's folder if it exists" kwargs = parse_kwargs(kwargs) data(dataset, **ctx.obj).rm(**kwargs)
removes the dataset's folder if it exists
def pause(ctx): lancet = ctx.obj paused_status = lancet.config.get("tracker", "paused_status") issue = get_issue(lancet) transition = get_transition(ctx, lancet, issue, paused_status) set_issue_status(lancet, issue, paused_status, transition) with taskstatus("Pausing harvest timer") as ts: lancet.timer.pause() ts.ok("Harvest timer paused")
Pause work on the current issue. This command puts the issue in the configured paused status and stops the current Harvest timer.
def print_tree(graph, tails, node_id_map): walker = graph.walk() next_block_num, next_parent, next_siblings = next(walker) prev_cliques = [] done = False while not done: cliques = {} block_num = next_block_num try: while block_num == next_block_num: cliques[next_parent] = next_siblings next_block_num, next_parent, next_siblings = next(walker) except StopIteration: done = True print_cliques(prev_cliques, cliques, node_id_map) print_block_num_row(block_num, prev_cliques, cliques) print_splits(prev_cliques, cliques) print_folds(prev_cliques, cliques) prev_cliques = build_ordered_cliques(prev_cliques, cliques) print_cliques(prev_cliques, [], node_id_map)
Print out a tree of blocks starting from the common ancestor.
def get_repository(self, path): parts = path.split('@', 1) if len(parts) == 1: parts = ("filesystem", parts[0]) repo_type, location = parts if repo_type == "filesystem": location = os.path.abspath(location) normalised_path = "%s@%s" % (repo_type, location) return self._get_repository(normalised_path)
Get a package repository. Args: path (str): Entry from the 'packages_path' config setting. This may simply be a path (which is managed by the 'filesystem' package repository plugin), or a string in the form "type@location", where 'type' identifies the repository plugin type to use. Returns: `PackageRepository` instance.
def qnh_estimate(self): alt_gps = self.master.field('GPS_RAW_INT', 'alt', 0) * 0.001 pressure2 = self.master.field('SCALED_PRESSURE', 'press_abs', 0) ground_temp = self.get_mav_param('GND_TEMP', 21) temp = ground_temp + 273.15 pressure1 = pressure2 / math.exp(math.log(1.0 - (alt_gps / (153.8462 * temp))) / 0.190259) return pressure1
estimate QNH pressure from GPS altitude and scaled pressure
def post_tweet_intent_handler(request): tweet = request.get_slot_value("Tweet") tweet = tweet if tweet else "" if tweet: user_state = twitter_cache.get_user_state(request.access_token()) def action(): return post_tweet(request.access_token(), tweet) message = "I am ready to post the tweet, {} ,\n Please say yes to confirm or stop to cancel .".format(tweet) user_state['pending_action'] = {"action" : action, "description" : message} return r.create_response(message=message, end_session=False) else: message = " ".join( [ "I'm sorry, I couldn't understand what you wanted to tweet .", "Please prepend the message with either post or tweet ." ] ) return alexa.create_response(message=message, end_session=False)
Use the 'intent' field in the VoiceHandler to map to the respective intent.
def mark_locations(h,section,locs,markspec='or',**kwargs): xyz = get_section_path(h,section) (r,theta,phi) = sequential_spherical(xyz) rcum = np.append(0,np.cumsum(r)) if type(locs) is float or type(locs) is np.float64: locs = np.array([locs]) if type(locs) is list: locs = np.array(locs) lengths = locs*rcum[-1] xyz_marks = [] for targ_length in lengths: xyz_marks.append(find_coord(targ_length,xyz,rcum,theta,phi)) xyz_marks = np.array(xyz_marks) line, = plt.plot(xyz_marks[:,0], xyz_marks[:,1], \ xyz_marks[:,2], markspec, **kwargs) return line
Marks one or more locations on along a section. Could be used to mark the location of a recording or electrical stimulation. Args: h = hocObject to interface with neuron section = reference to section locs = float between 0 and 1, or array of floats optional arguments specify details of marker Returns: line = reference to plotted markers
def reset(self): old_value = self._value old_raw_str_value = self.raw_str_value self._value = not_set self.raw_str_value = not_set new_value = self._value if old_value is not_set: return if self.section: self.section.dispatch_event( self.section.hooks.item_value_changed, item=self, old_value=old_value, new_value=new_value, old_raw_str_value=old_raw_str_value, new_raw_str_value=self.raw_str_value, )
Resets the value of config item to its default value.
def yaml_dump(dict_to_dump): yaml.SafeDumper.add_representer(OrderedDict, _dict_representer) return yaml.safe_dump(dict_to_dump, default_flow_style=False)
Dump the dictionary as a YAML document. :param dict_to_dump: Data to be serialized as YAML :type dict_to_dump: dict :return: YAML document :rtype: str
def links(self): links = Links() links["self"] = Link.for_( self._operation, self._ns, qs=self._page.to_items(), **self._context ) return links
Include a self link.
def determine_labels(target_dir: Path, label_type: str) -> Set[str]: logger.info("Finding phonemes of type %s in directory %s", label_type, target_dir) label_dir = target_dir / "label/" if not label_dir.is_dir(): raise FileNotFoundError( "The directory {} does not exist.".format(target_dir)) phonemes = set() for fn in os.listdir(str(label_dir)): if fn.endswith(str(label_type)): with (label_dir / fn).open("r", encoding=ENCODING) as f: try: line_phonemes = set(f.readline().split()) except UnicodeDecodeError: logger.error("Unicode decode error on file %s", fn) print("Unicode decode error on file {}".format(fn)) raise phonemes = phonemes.union(line_phonemes) return phonemes
Returns a set of all phonemes found in the corpus. Assumes that WAV files and label files are split into utterances and segregated in a directory which contains a "wav" subdirectory and "label" subdirectory. Arguments: target_dir: A `Path` to the directory where the corpus data is found label_type: The type of label we are creating the label set from. For example "phonemes" would only search for labels for that type.
def perform(self): if self.use_https: conn = client.HTTPSConnection(self.host, self.port) else: conn = client.HTTPConnection(self.host, self.port) conn.request(self.method, self.uri) response = conn.getresponse() conn.close() return bool(response.status >= 200 and response.status < 300)
Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method.
def load(self, table: str): if self._check_db() is False: return if table not in self.db.tables: self.warning("The table " + table + " does not exists") return try: self.start("Loading data from table " + table) res = self.db[table].all() self.df = pd.DataFrame(list(res)) self.end("Data loaded from table " + table) except Exception as e: self.err(e, "Can not load table " + table)
Set the main dataframe from a table's data :param table: table name :type table: str :example: ``ds.load("mytable")``
def execute(self, method, **kwargs): payload = { 'id': 1, 'jsonrpc': '2.0', 'method': method, 'params': kwargs } credentials = base64.b64encode('{}:{}'.format(self._username, self._password).encode()) auth_header_prefix = 'Basic ' if self._auth_header == DEFAULT_AUTH_HEADER else '' headers = { self._auth_header: auth_header_prefix + credentials.decode(), 'Content-Type': 'application/json', } return self._do_request(headers, payload)
Call remote API procedure Args: method: Procedure name kwargs: Procedure named arguments Returns: Procedure result Raises: urllib2.HTTPError: Any HTTP error (Python 2) urllib.error.HTTPError: Any HTTP error (Python 3)
def UCRTLibraries(self): if self.vc_ver < 14.0: return [] arch_subdir = self.pi.target_dir(x64=True) lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib') ucrtver = self._ucrt_subdir return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
Microsoft Universal C Runtime SDK Libraries
def __open_pidfile(self, write=False): try: self.pre_log.append(("DEBUG", "Opening %s pid file: %s" % ('existing' if os.path.exists(self.pid_filename) else 'missing', self.pid_filename))) if not write and os.path.exists(self.pid_filename): self.fpid = open(self.pid_filename, 'r+') else: self.fpid = open(self.pid_filename, 'w+') except Exception as exp: self.exit_on_error("Error opening pid file: %s. Error: %s. " "Check the %s:%s account permissions to write this file." % (self.pid_filename, str(exp), self.user, self.group), exit_code=3)
Open pid file in read or write mod :param write: boolean to open file in write mod (true = write) :type write: bool :return: None
def get(self, time, interpolate='previous'): try: getter = self.getter_functions[interpolate] except KeyError: msg = ( "unknown value '{}' for interpolate, " "valid values are in [{}]" ).format(interpolate, ', '.join(self.getter_functions)) raise ValueError(msg) else: return getter(time)
Get the value of the time series, even in-between measured values.
def inspect_commit(self, commit): req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit.
def get_progressbar(self, total, **options): progressbar = ColoredProgressBar(total) progressbar.steps_label = 'Commit' progressbar.elements += ['eta', 'time'] return progressbar
Returns progress bar instance for a given ``total`` number of clicks it should do.
def dpu(self, hash=None, historics_id=None): if hash: return self.request.get('dpu', params=dict(hash=hash)) if historics_id: return self.request.get('dpu', params=dict(historics_id=historics_id))
Calculate the DPU cost of consuming a stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/dpu :param hash: target CSDL filter hash :type hash: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def blog_reverse(viewname, args=None, kwargs=None, current_app='fluent_blogs', **page_kwargs): return mixed_reverse(viewname, args=args, kwargs=kwargs, current_app=current_app, **page_kwargs)
Reverse a URL to the blog, taking various configuration options into account. This is a compatibility function to allow django-fluent-blogs to operate stand-alone. Either the app can be hooked in the URLconf directly, or it can be added as a pagetype of *django-fluent-pages*.
def dict_to_op(d, index_name, doc_type, op_type='index'): if d is None: return d op_types = ('create', 'delete', 'index', 'update') if op_type not in op_types: msg = 'Unknown operation type "{}", must be one of: {}' raise Exception(msg.format(op_type, ', '.join(op_types))) if 'id' not in d: raise Exception('"id" key not found') operation = { '_op_type': op_type, '_index': index_name, '_type': doc_type, '_id': d.pop('id'), } operation.update(d) return operation
Create a bulk-indexing operation from the given dictionary.
def get_complex_attrs(self): return [getattr(self, attr_name) for attr_name in self._attributes if isinstance(getattr(self, attr_name), ComplexSchemaItem)]
Returns a dictionary of the complex attributes
def format_sensor(self, sensor): current_val = sensor.current if self.pango_enabled: percentage = self.percentage(sensor.current, sensor.critical) if self.dynamic_color: color = self.colors[int(percentage)] return self.format_pango(color, current_val) return current_val
Format a sensor value. If pango is enabled color is per sensor.
def latexify(obj, **kwargs): if hasattr(obj, '__pk_latex__'): return obj.__pk_latex__(**kwargs) if isinstance(obj, text_type): from .unicode_to_latex import unicode_to_latex return unicode_to_latex(obj) if isinstance(obj, bool): raise ValueError('no well-defined LaTeXification of bool %r' % obj) if isinstance(obj, float): nplaces = kwargs.get('nplaces') if nplaces is None: return '$%f$' % obj return '$%.*f$' % (nplaces, obj) if isinstance(obj, int): return '$%d$' % obj if isinstance(obj, binary_type): if all(c in _printable_ascii for c in obj): return obj.decode('ascii') raise ValueError('no safe LaTeXification of binary string %r' % obj) raise ValueError('can\'t LaTeXify %r' % obj)
Render an object in LaTeX appropriately.
def getcells(self, line): for boundary in self.boundaries: cell = line.lstrip()[boundary].strip() if cell: for cell in re.split('\s{3,}', cell): yield cell else: yield None
Using self.boundaries, extract cells from the given line.
def get_vasp_kpoint_file_sym(structure): output = run_aconvasp_command(["aconvasp", "--kpath"], structure) if "ERROR" in output[1]: raise AconvaspError(output[1]) started = False kpoints_string = "" for line in output[0].split("\n"): if started or line.find("END") != -1: kpoints_string = kpoints_string + line + "\n" if line.find("KPOINTS TO RUN") != -1: started = True if line.find("END") != -1: started = False return kpoints_string
get a kpoint file ready to be ran in VASP along the symmetry lines of the Brillouin Zone
def apply_cut(self, cut): return MacroSubsystem( self.network, self.network_state, self.micro_node_indices, cut=cut, time_scale=self.time_scale, blackbox=self.blackbox, coarse_grain=self.coarse_grain)
Return a cut version of this |MacroSubsystem|. Args: cut (Cut): The cut to apply to this |MacroSubsystem|. Returns: MacroSubsystem: The cut version of this |MacroSubsystem|.