code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def master(master=None, connected=True): * def _win_remotes_on(port): netstat remotes = set() try: data = subprocess.check_output([, , , ]) except subprocess.CalledProcessError: log.error() raise lines = salt.utils.stringutils.to_unicode(data).split() for line in lines: if not in line: continue chunks = line.split() remote_host, remote_port = chunks[2].rsplit(, 1) if int(remote_port) != port: continue remotes.add(remote_host) return remotes port = 4505 master_ips = None if master: master_ips = _host_to_ips(master) if not master_ips: return if __salt__[]() != : port = int(__salt__[]()) master_connection_status = False connected_ips = _win_remotes_on(port) for master_ip in master_ips: if master_ip in connected_ips: master_connection_status = True break if master_connection_status is not connected: event = salt.utils.event.get_event(, opts=__opts__, listen=False) if master_connection_status: event.fire_event({: master}, salt.minion.master_event(type=)) else: event.fire_event({: master}, salt.minion.master_event(type=)) return master_connection_status
.. versionadded:: 2015.5.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master
def _parse_videoname(cls, videoname): info = { : , : 0, : 0, : , : , : , : , : , } last_index = 0 m = cls.RE_SEASON_EPISODE.search(videoname) if m: info[] = int(m.group()) info[] = int(m.group()) s, e = m.span() info[] = videoname[0:s].strip() last_index = e else: m = cls.RE_SEASON.search(videoname) if m: info[] = int(m.group()) s, e = m.span() info[] = videoname[0:s].strip() last_index = e m = cls.RE_RESOLUTION.search(videoname) if m: info[] = m.group() s, e = m.span() if info[] == : info[] = videoname[0:s].strip() if info[] > 0 and info[] > 0: info[] = videoname[last_index:s].strip() last_index = e if info[] == : info[] = videoname m = cls.RE_SOURCE.search(videoname) if m: info[] = m.group() m = cls.RE_AUDIO_ENC.search(videoname) if m: info[] = m.group() m = cls.RE_VIDEO_ENC.search(videoname) if m: info[] = m.group() return info
parse videoname and return video info dict video info contains: - title, the name of video - sub_title, the sub_title of video - resolution, - source, - - season, defaults to 0 - episode, defaults to 0
def run(self): segments = self.controller.split() controller_class = reduce(getattr, segments[1:], __import__(.join(segments[:-1]))) cmd_line = [] if self.configuration is not None: cmd_line.extend([, self.configuration]) args = parser.get().parse_args(cmd_line) controller_instance = controller_class(args, platform) try: controller_instance.start() except KeyboardInterrupt: controller_instance.stop()
Import the controller and run it. This mimics the processing done by :func:`helper.start` when a controller is run in the foreground. A new instance of ``self.controller`` is created and run until a keyboard interrupt occurs or the controller stops on its own accord.
def _welch_anova(self, dv=None, between=None, export_filename=None): aov = welch_anova(data=self, dv=dv, between=between, export_filename=export_filename) return aov
Return one-way Welch ANOVA.
def handle_stage_changed(self, model): stages = model.get_stages() if self.dataman: self.dataman.set(, stages)
handle a stage change in the data model :param model: the data model that was changed
def get_Generic_itemtype(sq, simplify=True): if is_Tuple(sq): if simplify: itm_tps = [x for x in get_Tuple_params(sq)] simplify_for_Union(itm_tps) return Union[tuple(itm_tps)] else: return Union[get_Tuple_params(sq)] else: try: res = _select_Generic_superclass_parameters(sq, typing.Container) except TypeError: res = None if res is None: try: res = _select_Generic_superclass_parameters(sq, typing.Iterable) except TypeError: pass if res is None: raise TypeError("Has no itemtype: "+type_str(sq)) else: return res[0]
Retrieves the item type from a PEP 484 generic or subclass of such. sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container. Consequently this also works with typing.List, typing.Set and typing.Dict. Note that for typing.Dict and mapping types in general, the key type is regarded as item type. For typing.Tuple all contained types are returned as a typing.Union. If simplify == True some effort is taken to eliminate redundancies in such a union.
def describe_target_groups(names=None, target_group_arns=None, load_balancer_arn=None, region=None, key=None, keyid=None, profile=None): if names and target_group_arns: raise SaltInvocationError( ) if names: target_groups = names elif target_group_arns: target_groups = target_group_arns else: target_groups = None tg_list = [] if target_groups: if isinstance(target_groups, str) or isinstance(target_groups, six.text_type): tg_list.append(target_groups) else: for group in target_groups: tg_list.append(group) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if names: ret = conn.describe_target_groups(Names=tg_list)[] elif target_group_arns: ret = conn.describe_target_groups(TargetGroupArns=tg_list)[] elif load_balancer_arn: ret = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn)[] else: ret = [] next_marker = while True: r = conn.describe_target_groups(Marker=next_marker) for alb in r[]: ret.append(alb) if in r: next_marker = r[] else: break return ret if ret else [] except ClientError as error: log.warning(error) return False
Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups. Returns: list CLI example: .. code-block:: bash salt myminion boto_elbv2.describe_target_groups salt myminion boto_elbv2.describe_target_groups target_group_name salt myminion boto_elbv2.describe_target_groups "[tg_name,tg_name]"
def get_min_vertex_distance( coor, guess ): ix = nm.argsort( coor[:,0] ) scoor = coor[ix] mvd = 1e16 n_coor = coor.shape[0] print n_coor i0 = i1 = 0 x0 = scoor[i0,0] while 1: while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)): i1 += 1 aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] ) if aux < mvd: im, a1, a2 = aim, aa1 + i0, aa2 + i0 mvd = min( mvd, aux ) i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1 x0 = scoor[i0,0] if i1 == n_coor - 1: break print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2] return mvd
Can miss the minimum, but is enough for our purposes.
def search(self, **kwargs): return super(ApiEquipment, self).get(self.prepare_url(, kwargs))
Method to search equipments based on extends search. :param search: Dict containing QuerySets to find equipments. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing equipments
def _parse_param(key, val): regex = re.compile(r) match = regex.match(key) if match: if not isinstance(val, list): val = val.split() fields = [field.lower() for field in val] rtype = match.groups()[0].lower() return rtype, fields
Parse the query param looking for sparse fields params Ensure the `val` or what will become the sparse fields is always an array. If the query param is not a sparse fields query param then return None. :param key: the query parameter key in the request (left of =) :param val: the query parameter val in the request (right of =) :return: tuple of resource type to implement the sparse fields on & a array of the fields.
def read_function(data, window, ij, g_args): output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max() return output
Takes an array, and sets any value above the mean to the max, the rest to 0
def json_response(obj): return Response( response=json.dumps( obj, indent=4, cls=AirflowJsonEncoder), status=200, mimetype="application/json")
returns a json response from a json serializable python object
def _group_batches_shared(xs, caller_batch_fn, prep_data_fn): singles = [] batch_groups = collections.defaultdict(list) for args in xs: data = utils.to_single_data(args) caller, batch = caller_batch_fn(data) region = _list_to_tuple(data["region"]) if "region" in data else () if batch is not None: batches = batch if isinstance(batch, (list, tuple)) else [batch] for b in batches: batch_groups[(b, region, caller)].append(utils.deepish_copy(data)) else: data = prep_data_fn(data, [data]) singles.append(data) batches = [] for batch, items in batch_groups.items(): batch_data = utils.deepish_copy(_pick_lead_item(items)) if tz.get_in(["metadata", "batch"], batch_data): batch_name = batch[0] batch_data["metadata"]["batch"] = batch_name batch_data = prep_data_fn(batch_data, items) batch_data["group_orig"] = _collapse_subitems(batch_data, items) batch_data["group"] = batch batches.append(batch_data) return singles + batches
Shared functionality for grouping by batches for variant calling and joint calling.
async def on_raw_cap(self, message): target, subcommand = message.params[:2] params = message.params[2:] attr = + pydle.protocol.identifierify(subcommand) if hasattr(self, attr): await getattr(self, attr)(params) else: self.logger.warning(, subcommand)
Handle CAP message.
def is_authenticated(user): if not hasattr(user, ): return False if callable(user.is_authenticated): return user.is_authenticated() else: return user.is_authenticated
Return whether or not a User is authenticated. Function provides compatibility following deprecation of method call to `is_authenticated()` in Django 2.0. This is *only* required to support Django < v1.10 (i.e. v1.9 and earlier), as `is_authenticated` was introduced as a property in v1.10.s
def buildNavigation(self): if self.buildSpec[] == constants.TABBED: navigation = Tabbar(self, self.buildSpec, self.configs) else: navigation = Sidebar(self, self.buildSpec, self.configs) if self.buildSpec[] == constants.HIDDEN: navigation.Hide() return navigation
Chooses the appropriate layout navigation component based on user prefs
def _fill_function(func, globals, defaults, dict, module, closure_values): func.__globals__.update(globals) func.__defaults__ = defaults func.__dict__ = dict func.__module__ = module cells = func.__closure__ if cells is not None: for cell, value in zip(cells, closure_values): if value is not _empty_cell_value: cell_set(cell, value) return func
Fills in the rest of function data into the skeleton function object that were created via _make_skel_func().
def form_lines_valid(self, form): handled = 0 for inner_form in form: if not inner_form.cleaned_data.get(formsets.DELETION_FIELD_NAME): handled += 1 self.handle_inner_form(inner_form) self.log_and_notify_lines(handled) return http.HttpResponseRedirect(self.get_success_url())
Handle a valid LineFormSet.
def submesh(mesh, faces_sequence, only_watertight=False, append=False): faces_sequence = list(faces_sequence) if len(faces_sequence) == 0: return [] for i in visuals): visuals = np.array(visuals) visual = visuals[0].concatenate(visuals[1:]) else: visual = None vertices, faces = append_faces(vertices, faces) appended = trimesh_type( vertices=vertices, faces=faces, face_normals=np.vstack(normals), visual=visual, process=False) return appended result = [trimesh_type( vertices=v, faces=f, face_normals=n, visual=c, metadata=copy.deepcopy(mesh.metadata), process=False) for v, f, n, c in zip(vertices, faces, normals, visuals)] result = np.array(result) if len(result) > 0 and only_watertight: watertight = np.array([i.fill_holes() and len(i.faces) >= 4 for i in result]) result = result[watertight] return result
Return a subset of a mesh. Parameters ---------- mesh : Trimesh Source mesh to take geometry from faces_sequence : sequence (p,) int Indexes of mesh.faces only_watertight : bool Only return submeshes which are watertight. append : bool Return a single mesh which has the faces appended, if this flag is set, only_watertight is ignored Returns --------- if append : Trimesh object else list of Trimesh objects
def set_row_height(self, row, tab, height): try: old_height = self.row_heights.pop((row, tab)) except KeyError: old_height = None if height is not None: self.row_heights[(row, tab)] = float(height)
Sets row height
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None, timeout_for_request=None, allow_fallback=True): self.hadoop_settings[] = { : maximum_bid_price_percentage, : timeout_for_request, : allow_fallback}
Purchase options for stable spot instances. `maximum_bid_price_percentage`: Maximum value to bid for stable node spot instances, expressed as a percentage of the base price (applies to both master and slave nodes). `timeout_for_request`: Timeout for a stable node spot instance request (Unit: minutes) `allow_fallback`: Whether to fallback to on-demand instances for stable nodes if spot instances are not available
def map_data(self): with open(self.src_file, "r") as f: for line in f: cols = line.split() print(cols)
provides a mapping from the CSV file to the aikif data structures.
def allocate_objects(self, eps = 0.01, noise_size = 1): if (self.__object_segment_analysers is None): return []; segments = []; for object_segment_analyser in self.__object_segment_analysers: indexes = object_segment_analyser[]; analyser = object_segment_analyser[]; segments += analyser.allocate_clusters(eps, indexes); real_segments = [segment for segment in segments if len(segment) > noise_size]; return real_segments;
! @brief Allocates object segments. @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment. @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise. @return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
def init(config, workdir=None, logfile=None, loglevel=logging.INFO, **kwargs): setup_sdk_logging(logfile, loglevel) defaults = lago_config.get_section() if workdir is None: workdir = os.path.abspath() defaults[] = workdir defaults[] = config defaults.update(kwargs) workdir, prefix = cmd.do_init(**defaults) return SDK(workdir, prefix)
Initialize the Lago environment Args: config(str): Path to LagoInitFile workdir(str): Path to initalize the workdir, defaults to "$PWD/.lago" **kwargs(dict): Pass arguments to :func:`~lago.cmd.do_init` logfile(str): A path to setup a log file. loglevel(int): :mod:`logging` log level. Returns: :class:`~lago.sdk.SDK`: Initialized Lago enviornment Raises: :exc:`~lago.utils.LagoException`: If initialization failed
def getDatetimeAxis(): dataSet = filePath = + dataSet + data = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=[, , , ]) xaxisDate = pd.to_datetime(data[]) return xaxisDate
use datetime as x-axis
def getSpec(cls): spec = { "description": ApicalTMSequenceRegion.__doc__, "singleNodeOnly": True, "inputs": { "activeColumns": { "description": ("An array of 0s representing the active " "minicolumns, i.e. the input to the TemporalMemory"), "dataType": "Real32", "count": 0, "required": True, "regionLevel": True, "isDefaultInput": True, "requireSplitterMap": False }, "resetIn": { "description": ("A boolean flag that indicates whether" " or not the input vector received in this compute cycle" " represents the first presentation in a" " new temporal sequence."), "dataType": "Real32", "count": 1, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, "apicalInput": { "description": "An array of 0s representing top down input." " The input will be provided to apical dendrites.", "dataType": "Real32", "count": 0, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, "apicalGrowthCandidates": { "description": ("An array of 0s representing apical input " "that can be learned on new synapses on apical " "segments. If this input is a length-0 array, the " "whole apicalInput is used."), "dataType": "Real32", "count": 0, "required": False, "regionLevel": True, "isDefaultInput": False, "requireSplitterMap": False }, }, "outputs": { "nextPredictedCells": { "description": ("A binary output containing a 1 for every " "cell that is predicted for the next timestep."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, "predictedActiveCells": { "description": ("A binary output containing a 1 for every " "cell that transitioned from predicted to active."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, "activeCells": { "description": ("A binary output containing a 1 for every " "cell that is currently active."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": True }, "winnerCells": { "description": ("A binary output containing a 1 for every " " cell in the TM."), "dataType": "Real32", "count": 0, "regionLevel": True, "isDefaultOutput": False }, }, "parameters": { "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "learn": { "description": "True if the TM should learn.", "accessMode": "ReadWrite", "dataType": "Bool", "count": 1, "defaultValue": "true" }, "cellsPerColumn": { "description": "Number of cells per column", "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "activationThreshold": { "description": ("If the number of active connected synapses on a " "segment is at least this threshold, the segment " "is said to be active."), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "reducedBasalThreshold": { "description": ("Activation threshold of basal segments for cells " "with active apical segments (with apicalTiebreak " "implementation). "), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "initialPermanence": { "description": "Initial permanence of a new synapse.", "accessMode": "Read", "dataType": "Real32", "count": 1, "constraints": "" }, "connectedPermanence": { "description": ("If the permanence value for a synapse is greater " "than this value, it is said to be connected."), "accessMode": "Read", "dataType": "Real32", "count": 1, "constraints": "" }, "minThreshold": { "description": ("If the number of synapses active on a segment is at " "least this threshold, it is selected as the best " "matching cell in a bursting column."), "accessMode": "Read", "dataType": "UInt32", "count": 1, "constraints": "" }, "sampleSize": { "description": ("The desired number of active synapses for an " + "active cell"), "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "learnOnOneCell": { "description": ("If True, the winner cell for each column will be" " fixed between resets."), "accessMode": "Read", "dataType": "Bool", "count": 1, "defaultValue": "false" }, "maxSynapsesPerSegment": { "description": "The maximum number of synapses per segment", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "maxSegmentsPerCell": { "description": "The maximum number of segments per cell", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "permanenceIncrement": { "description": ("Amount by which permanences of synapses are " "incremented during learning."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "permanenceDecrement": { "description": ("Amount by which permanences of synapses are " "decremented during learning."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "basalPredictedSegmentDecrement": { "description": ("Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "apicalPredictedSegmentDecrement": { "description": ("Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented."), "accessMode": "Read", "dataType": "Real32", "count": 1 }, "seed": { "description": "Seed for the random number generator.", "accessMode": "Read", "dataType": "UInt32", "count": 1 }, "implementation": { "description": "Apical implementation", "accessMode": "Read", "dataType": "Byte", "count": 0, "constraints": ("enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent"), "defaultValue": "ApicalTiebreakCPP" }, }, } return spec
Return the Spec for ApicalTMSequenceRegion.
def _map_input(self, input_stream): for record in self.reader(input_stream): for output in self.mapper(*record): yield output if self.final_mapper != NotImplemented: for output in self.final_mapper(): yield output self._flush_batch_incr_counter()
Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value.
def get_dev_details(ip_address): if auth is None or url is None: set_imc_creds() global r get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \ str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false" f_url = url + get_dev_details_url payload = None r = requests.get(f_url, auth=auth, headers=headers) if r.status_code == 200: dev_details = (json.loads(r.text)) if len(dev_details) == 0: print("Device not found") return "Device not found" elif type(dev_details[]) == list: for i in dev_details[]: if i[] == ip_address: dev_details = i return dev_details elif type(dev_details[]) == dict: return dev_details[] else: print("dev_details: An Error has occured")
Takes string input of IP address to issue RESTUL call to HP IMC :param ip_address: string object of dotted decimal notation of IPv4 address :return: dictionary of device details >>> get_dev_details('10.101.0.1') {'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'} >>> get_dev_details('8.8.8.8') Device not found 'Device not found'
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): C = self.COEFFS[imt] mean = (self._get_magnitude_scaling(C, rup.mag) + self._get_distance_scaling(C, dists, rup.mag) + self._get_site_term(C, sites.vs30)) mean -= np.log(g) stddevs = self.get_stddevs(C, sites.vs30.shape, stddev_types) return mean + self.adjustment_factor, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
def colors_like(color, arr, colormap=DEFAULT_COLORMAP): s in X11 import numpy as np from blmath.numerics import is_empty_arraylike if is_empty_arraylike(color): return None if isinstance(color, basestring): from lace.color_names import name_to_rgb color = name_to_rgb[color] elif isinstance(color, list): color = np.array(color) color = np.squeeze(color) num_verts = arr.shape[0] if color.ndim == 1: if color.shape[0] == 3: return np.ones((num_verts, 3)) * np.array([color]) else: from matplotlib import cm return np.ones((num_verts, 3)) * cm.get_cmap(colormap)(color.flatten())[:, :3] elif color.ndim == 2: if color.shape[1] == num_verts: color = color.T return np.ones((num_verts, 3)) * color else: raise ValueError("Colors must be specified as one or two dimensions")
Given an array of size NxM (usually Nx3), we accept color in the following ways: - A string color name. The accepted names are roughly what's in X11's rgb.txt - An explicit rgb triple, in (3, ), (3, 1), or (1, 3) shape - A list of values (N, ), (N, 1), or (1, N) that are put through a colormap to get per vertex color - An array of colors (N, 3) or (3, N) There is a potential for conflict here if N == 3. In that case we assume a value is an rgb triple, not a colormap index. This is a sort of degenerate case, as a mesh with three verticies is just a single triangle and not something we ever actually use in practice.
def sources(self): api_url = self.sources_api_url.format(experience_id=self.experience_id) res = self.get(api_url, params={"pinst_id": self.pinst_id}) return self.session.http.json(res)
Get the sources for a given experience_id, which is tied to a specific language :param experience_id: int; video content id :return: sources dict
def drop_column(self, name): if self.db.engine.dialect.name == : raise RuntimeError("SQLite does not support dropping columns.") name = normalize_column_name(name) with self.db.lock: if not self.exists or not self.has_column(name): log.debug("Column does not exist: %s", name) return self._threading_warn() self.db.op.drop_column( self.table.name, name, self.table.schema ) self._reflect_table()
Drop the column ``name``. :: table.drop_column('created_at')
def get_random_edge(self): nodes = [ (n, self.in_out_ratio(n)) for n in self.unscored_nodes_iter() if n != self.target_node ] node, deg = min(nodes, key=itemgetter(1)) log.log(5, , node, deg) possible_edges = self.graph.in_edges(node, keys=True) log.log(5, , possible_edges) edge_to_remove = random.choice(possible_edges) log.log(5, , edge_to_remove) return edge_to_remove
This function should be run when there are no leaves, but there are still unscored nodes. It will introduce a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score for the network. This means that the score can be averaged over many runs for a given graph, and a better data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges have been disregarded, later) 1. get all un-scored 2. rank by in-degree 3. weighted probability over all in-edges where lower in-degree means higher probability 4. pick randomly which edge :return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key) :rtype: tuple
def setup_experiment(debug=True, verbose=False, app=None): print_header() log("Verifying that directory is compatible with Wallace...") if not verify_package(verbose=verbose): raise AssertionError( "This is not a valid Wallace app. " + "Fix the errors and then try running .") try: psycopg2.connect(database="x", user="postgres", password="nada") except psycopg2.OperationalError, e: if "could not connect to server" in str(e): raise RuntimeError("The Postgres server isns user-facing. if app: id_long = id id = str(app) log("Running as experiment " + id + "...") dst = os.path.join(tempfile.mkdtemp(), id) to_ignore = shutil.ignore_patterns( ".git/*", "*.db", "snapshots", "data", "server.log" ) shutil.copytree(os.getcwd(), dst, ignore=to_ignore) click.echo(dst) with open(os.path.join(dst, "experiment_id.txt"), "w") as file: if app: file.write(id_long) else: file.write(id) if not debug: log("Freezing the experiment package...") shutil.make_archive( os.path.join("snapshots", id + "-code"), "zip", dst) cwd = os.getcwd() os.chdir(dst) if not os.path.exists("static/scripts"): os.makedirs("static/scripts") if not os.path.exists("templates"): os.makedirs("templates") if not os.path.exists("static/css"): os.makedirs("static/css") os.rename( os.path.join(dst, "experiment.py"), os.path.join(dst, "wallace_experiment.py")) src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "custom.py") shutil.copy(src, os.path.join(dst, "custom.py")) heroku_files = [ "Procfile", "requirements.txt", "psiturkapp.py", "worker.py", "clock.py", ] for filename in heroku_files: src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "heroku", filename) shutil.copy(src, os.path.join(dst, filename)) clock_on = config.getboolean(, ) if not clock_on: src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "heroku", "Procfile_no_clock") shutil.copy(src, os.path.join(dst, "Procfile")) frontend_files = [ "static/css/wallace.css", "static/scripts/wallace.js", "static/scripts/reqwest.min.js", "templates/error_wallace.html", "templates/launch.html", "templates/complete.html", "static/robots.txt" ] for filename in frontend_files: src = os.path.join( os.path.dirname(os.path.realpath(__file__)), "frontend", filename) shutil.copy(src, os.path.join(dst, filename)) time.sleep(0.25) os.chdir(cwd) return (id, dst)
Check the app and, if it's compatible with Wallace, freeze its state.
def copy(self): return CIMClassName( self.classname, host=self.host, namespace=self.namespace)
Return a new :class:`~pywbem.CIMClassName` object that is a copy of this CIM class path. Objects of this class have no mutable types in any attributes, so modifications of the original object will not affect the returned copy, and vice versa. Note that the Python functions :func:`py:copy.copy` and :func:`py:copy.deepcopy` can be used to create completely shallow or completely deep copies of objects of this class.
def extract_signature(docstring): root = publish_doctree(docstring, settings_overrides={"report_level": 5}) fields = get_fields(root) return fields.get(SIG_FIELD)
Extract the signature from a docstring. :sig: (str) -> Optional[str] :param docstring: Docstring to extract the signature from. :return: Extracted signature, or ``None`` if there's no signature.
def search_directory(self, **kwargs): search_response = self.request(, kwargs) result = {} items = { "account": zobjects.Account.from_dict, "domain": zobjects.Domain.from_dict, "dl": zobjects.DistributionList.from_dict, "cos": zobjects.COS.from_dict, "calresource": zobjects.CalendarResource.from_dict } for obj_type, func in items.items(): if obj_type in search_response: if isinstance(search_response[obj_type], list): result[obj_type] = [ func(v) for v in search_response[obj_type]] else: result[obj_type] = func(search_response[obj_type]) return result
SearchAccount is deprecated, using SearchDirectory :param query: Query string - should be an LDAP-style filter string (RFC 2254) :param limit: The maximum number of accounts to return (0 is default and means all) :param offset: The starting offset (0, 25, etc) :param domain: The domain name to limit the search to :param applyCos: applyCos - Flag whether or not to apply the COS policy to account. Specify 0 (false) if only requesting attrs that aren't inherited from COS :param applyConfig: whether or not to apply the global config attrs to account. specify 0 (false) if only requesting attrs that aren't inherited from global config :param sortBy: Name of attribute to sort on. Default is the account name. :param types: Comma-separated list of types to return. Legal values are: accounts|distributionlists|aliases|resources|domains|coses (default is accounts) :param sortAscending: Whether to sort in ascending order. Default is 1 (true) :param countOnly: Whether response should be count only. Default is 0 (false) :param attrs: Comma-seperated list of attrs to return ("displayName", "zimbraId", "zimbraAccountStatus") :return: dict of list of "account" "alias" "dl" "calresource" "domain" "cos"
def add_tag(self, tag): if tag not in self._tags: self._tags[tag] = dict()
add a tag to the tag list
def dependency_items(self): def _get_used_items_np(u): volume_config_name, __, volume_instance = u.name.partition() attaching_config_name = attaching.get(volume_config_name) if attaching_config_name: used_c_name = attaching_config_name used_instances = instances.get(attaching_config_name) else: used_c_name = volume_config_name if volume_instance: used_instances = (volume_instance, ) else: used_instances = instances.get(volume_config_name) return [MapConfigId(ItemType.CONTAINER, self._name, used_c_name, ai) for ai in used_instances or (None, )] def _get_used_items_ap(u): volume_config_name, __, volume_instance = u.name.partition() attaching_config = ext_map.get_existing(volume_config_name) attaching_instances = instances.get(volume_config_name) config_volumes = {a.name for a in attaching_config.attaches} if not volume_instance or volume_instance in config_volumes: used_instances = attaching_instances else: used_instances = (volume_instance, ) return [MapConfigId(ItemType.CONTAINER, self._name, volume_config_name, ai) for ai in used_instances or (None, )] def _get_linked_items(lc): linked_config_name, __, linked_instance = lc.partition() if linked_instance: linked_instances = (linked_instance, ) else: linked_instances = instances.get(linked_config_name) return [MapConfigId(ItemType.CONTAINER, self._name, linked_config_name, li) for li in linked_instances or (None, )] def _get_network_mode_items(n): net_config_name, net_instance = n network_ref_config = ext_map.get_existing(net_config_name) if network_ref_config: if net_instance and net_instance in network_ref_config.instances: network_instances = (net_instance, ) else: network_instances = network_ref_config.instances or (None, ) return [MapConfigId(ItemType.CONTAINER, self._name, net_config_name, ni) for ni in network_instances] return [] def _get_network_items(n): if n.network_name in DEFAULT_PRESET_NETWORKS: return [] net_items = [MapConfigId(ItemType.NETWORK, self._name, n.network_name)] if n.links: net_items.extend(itertools.chain.from_iterable(_get_linked_items(l.container) for l in n.links)) return net_items if self._extended: ext_map = self else: ext_map = self.get_extended_map() instances = {c_name: c_config.instances for c_name, c_config in ext_map} if not self.use_attached_parent_name: attaching = {attaches.name: c_name for c_name, c_config in ext_map for attaches in c_config.attaches} used_func = _get_used_items_np else: used_func = _get_used_items_ap def _get_dep_list(name, config): image, tag = self.get_image(config.image or name) d = [] nw = config.network_mode if isinstance(nw, tuple): merge_list(d, _get_network_mode_items(nw)) merge_list(d, itertools.chain.from_iterable(map(_get_network_items, config.networks))) merge_list(d, itertools.chain.from_iterable(map(used_func, config.uses))) merge_list(d, itertools.chain.from_iterable(_get_linked_items(l.container) for l in config.links)) d.extend(MapConfigId(ItemType.VOLUME, self._name, name, a.name) for a in config.attaches) d.append(MapConfigId(ItemType.IMAGE, self._name, image, tag)) return d for c_name, c_config in ext_map: dep_list = _get_dep_list(c_name, c_config) for c_instance in c_config.instances or (None, ): yield MapConfigId(ItemType.CONTAINER, self._name, c_name, c_instance), dep_list
Generates all containers' dependencies, i.e. an iterator on tuples in the format ``(container_name, used_containers)``, whereas the used containers are a set, and can be empty. :return: Container dependencies. :rtype: collections.Iterable
def slugify(text, sep=): text = stringify(text) if text is None: return None text = text.replace(sep, WS) text = normalize(text, ascii=True) if text is None: return None return text.replace(WS, sep)
A simple slug generator.
def get_sub_comp_info(source_info, comp): sub_comps = source_info.get(, None) if sub_comps is None: return source_info.copy() moving = source_info.get(, False) selection_dependent = source_info.get(, False) if selection_dependent: key = comp.make_key() elif moving: key = "zmax%i" % comp.zmax ret_dict = source_info.copy() ret_dict.update(sub_comps[key]) return ret_dict
Build and return information about a sub-component for a particular selection
def set_sum_w2(self, w, ix, iy=0, iz=0): if self.GetSumw2N() == 0: raise RuntimeError( "Attempting to access Sumw2 in histogram " "where weights were not stored") xl = self.nbins(axis=0, overflow=True) yl = self.nbins(axis=1, overflow=True) idx = xl * yl * iz + xl * iy + ix if not 0 <= idx < self.GetSumw2N(): raise IndexError("bin index out of range") self.GetSumw2().SetAt(w, idx)
Sets the true number of entries in the bin weighted by w^2
def create_prefetch(self, addresses): with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True)
Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain).
def OxmlElement(nsptag_str, nsmap=None): nsptag = NamespacePrefixedTag(nsptag_str) nsmap = nsmap if nsmap is not None else nsptag.nsmap return oxml_parser.makeelement(nsptag.clark_name, nsmap=nsmap)
Return a 'loose' lxml element having the tag specified by *nsptag_str*. *nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'. The resulting element is an instance of the custom element class for this tag name if one is defined.
def withdict(parser, token): bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("{% withdict %} expects one argument") nodelist = parser.parse((,)) parser.delete_first_token() return WithDictNode( nodelist=nodelist, context_expr=parser.compile_filter(bits[1]) )
Take a complete context dict as extra layer.
def set_argsx(self, arguments, *args): return lib.zproc_set_argsx(self._as_parameter_, arguments, *args)
Setup the command line arguments, the first item must be an (absolute) filename to run. Variadic function, must be NULL terminated.
def _parse_request_arguments(self, request): inference_addresses = request.args.get().split() model_names = request.args.get().split() model_versions = request.args.get().split() model_signatures = request.args.get().split() if len(model_names) != len(inference_addresses): raise common_utils.InvalidUserInputError( + ) return inference_addresses, model_names, model_versions, model_signatures
Parses comma separated request arguments Args: request: A request that should contain 'inference_address', 'model_name', 'model_version', 'model_signature'. Returns: A tuple of lists for model parameters
def _shortcut_open( uri, mode, ignore_ext=False, buffering=-1, encoding=None, errors=None, ): if not isinstance(uri, six.string_types): return None parsed_uri = _parse_uri(uri) if parsed_uri.scheme != : return None _, extension = P.splitext(parsed_uri.uri_path) if extension in _COMPRESSOR_REGISTRY and not ignore_ext: return None open_kwargs = {} if encoding is not None: open_kwargs[] = encoding mode = mode.replace(, ) if six.PY3: return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs) elif not open_kwargs: return _builtin_open(parsed_uri.uri_path, mode, buffering=buffering) return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
Try to open the URI using the standard library io.open function. This can be much faster than the alternative of opening in binary mode and then decoding. This is only possible under the following conditions: 1. Opening a local file 2. Ignore extension is set to True If it is not possible to use the built-in open for the specified URI, returns None. :param str uri: A string indicating what to open. :param str mode: The mode to pass to the open function. :param dict kw: :returns: The opened file :rtype: file
def random(pages=1): query_params = { : , : 0, : pages, } request = _wiki_request(query_params) titles = [page[] for page in request[][]] if len(titles) == 1: return titles[0] return titles
Get a list of random Wikipedia article titles. .. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages. Keyword arguments: * pages - the number of random pages returned (max of 10)
def _parse(self): if self._str[0:5] != CPE2_3_WFN.CPE_PREFIX: errmsg = "Bad-formed CPE Name: WFN prefix not found" raise ValueError(errmsg) if self._str[-1:] != "]": errmsg = "Bad-formed CPE Name: final bracket of WFN not found" raise ValueError(errmsg) content = self._str[5:-1] if content != "": components = dict() list_component = content.split(CPEComponent2_3_WFN.SEPARATOR_COMP) for e in list_component: if e.find(" ") != -1: msg = "Bad-formed CPE Name: WFN with too many whitespaces" raise ValueError(msg) pair = e.split(CPEComponent2_3_WFN.SEPARATOR_PAIR) att_name = pair[0] att_value = pair[1] if att_name not in CPEComponent.CPE_COMP_KEYS_EXTENDED: msg = "Bad-formed CPE Name: invalid attribute name ".format( att_name) raise ValueError(msg) if att_name in components: msg = "Bad-formed CPE Name: attribute repeated".format( att_name) raise ValueError(msg) if not (att_value.startswith() and att_value.endswith()): strUpper = att_value.upper() if strUpper == CPEComponent2_3_WFN.VALUE_ANY: comp = CPEComponentAnyValue() elif strUpper == CPEComponent2_3_WFN.VALUE_NA: comp = CPEComponentNotApplicable() else: msg = "Invalid logical value ".format(att_value) raise ValueError(msg) elif att_value.startswith() and att_value.endswith(): comp = CPEComponent2_3_WFN(att_value, att_name) else: msg = "Bad-formed CPE Name: invalid value ".format( att_value) raise ValueError(msg) components[att_name] = comp for ck in CPEComponent.CPE_COMP_KEYS_EXTENDED: if ck not in components: components[ck] = CPEComponentUndefined() part_comp = components[CPEComponent.ATT_PART] if isinstance(part_comp, CPEComponentLogical): elements = [] elements.append(components) self[CPE.KEY_UNDEFINED] = elements else: part_value = part_comp.get_value() system = part_value[1:-1] if system in CPEComponent.SYSTEM_VALUES: self._create_cpe_parts(system, components) else: self._create_cpe_parts(CPEComponent.VALUE_PART_UNDEFINED, components) for pk in CPE.CPE_PART_KEYS: if pk not in self.keys(): self[pk] = []
Checks if the CPE Name is valid. :returns: None :exception: ValueError - bad-formed CPE Name
def mint_sub(client_salt, sector_id="", subject_type="public", uid=, user_salt=): if subject_type == "public": sub = hashlib.sha256( "{}{}".format(uid, user_salt).encode("utf-8")).hexdigest() else: sub = pairwise_id(uid, sector_id, "{}{}".format(client_salt, user_salt)) return sub
Mint a new sub (subject identifier) :param authn_event: Authentication event information :param client_salt: client specific salt - used in pairwise :param sector_id: Possible sector identifier :param subject_type: 'public'/'pairwise' :return: Subject identifier
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int: return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards)
Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards.
def invalid_config_error_message(action, key, val): if action in (, ): return ("{0} is not a valid value for {1} option, " "please specify a boolean value like yes/no, " "true/false or 1/0 instead.").format(val, key) return ("{0} is not a valid value for {1} option, " "please specify a numerical value like 1/0 " "instead.").format(val, key)
Returns a better error message when invalid configuration option is provided.
def independent_data(self): return OrderedDict((var, self.data[var]) for var in self.model.independent_vars)
Read-only Property :return: Data belonging to each independent variable as a dict with variable names as key, data as value. :rtype: collections.OrderedDict
def OnSecondaryCheckbox(self, event): self.attrs["top"] = event.IsChecked() self.attrs["right"] = event.IsChecked() post_command_event(self, self.DrawChartMsg)
Top Checkbox event handler
def write(self, path=None): written_count = 0 comments_written = 0 blanks_written = 0 ipv4_entries_written = 0 ipv6_entries_written = 0 if path: output_file_path = path else: output_file_path = self.hosts_path try: with open(output_file_path, ) as hosts_file: for written_count, line in enumerate(self.entries): if line.entry_type == : hosts_file.write(line.comment + "\n") comments_written += 1 if line.entry_type == : hosts_file.write("\n") blanks_written += 1 if line.entry_type == : hosts_file.write( "{0}\t{1}\n".format( line.address, .join(line.names), ) ) ipv4_entries_written += 1 if line.entry_type == : hosts_file.write( "{0}\t{1}\n".format( line.address, .join(line.names), )) ipv6_entries_written += 1 except: raise UnableToWriteHosts() return {: written_count + 1, : comments_written, : blanks_written, : ipv4_entries_written, : ipv6_entries_written}
Write all of the HostsEntry instances back to the hosts file :param path: override the write path :return: Dictionary containing counts
def clear_data(self, queues=None, edge=None, edge_type=None): queues = _get_queues(self.g, queues, edge, edge_type) for k in queues: self.edge2queue[k].data = {}
Clears data from all queues. If none of the parameters are given then every queue's data is cleared. Parameters ---------- queues : int or an iterable of int (optional) The edge index (or an iterable of edge indices) identifying the :class:`QueueServer(s)<.QueueServer>` whose data will be cleared. edge : 2-tuple of int or *array_like* (optional) Explicitly specify which queues' data to clear. Must be either: * A 2-tuple of the edge's source and target vertex indices, or * An iterable of 2-tuples of the edge's source and target vertex indices. edge_type : int or an iterable of int (optional) A integer, or a collection of integers identifying which edge types will have their data cleared.
def sanity(request, sysmeta_pyxb): _does_not_contain_replica_sections(sysmeta_pyxb) _is_not_archived(sysmeta_pyxb) _obsoleted_by_not_specified(sysmeta_pyxb) if in request.META: return _has_correct_file_size(request, sysmeta_pyxb) _is_supported_checksum_algorithm(sysmeta_pyxb) _is_correct_checksum(request, sysmeta_pyxb)
Check that sysmeta_pyxb is suitable for creating a new object and matches the uploaded sciobj bytes.
def _prt_qualifiers(associations, prt=sys.stdout): prt.write() for fld, cnt in cx.Counter(q for nt in associations for q in nt.Qualifier).most_common(): prt.write(.format(N=cnt, FLD=fld))
Print Qualifiers found in the annotations. QUALIFIERS: 1,462 colocalizes_with 1,454 contributes_to 1,157 not 13 not colocalizes_with (TBD: CHK - Seen in gene2go, but not gafs) 4 not contributes_to (TBD: CHK - Seen in gene2go, but not gafs)
def _compute_site_scaling(self, C, vs30): site_term = np.zeros(len(vs30), dtype=float) site_term[vs30 < 760.0] = C["e"] return site_term
Returns the site scaling term as a simple coefficient
def sort(self): if self.is_guaranteed_sorted: self.log(u"Already sorted, returning") return self.log(u"Sorting...") self.__fragments = sorted(self.__fragments) self.log(u"Sorting... done") self.log(u"Checking relative positions...") for i in range(len(self) - 1): current_interval = self[i].interval next_interval = self[i + 1].interval if current_interval.relative_position_of(next_interval) not in self.ALLOWED_POSITIONS: self.log(u"Found overlapping fragments:") self.log([u" Index %d => %s", i, current_interval]) self.log([u" Index %d => %s", i + 1, next_interval]) self.log_exc(u"The list contains two fragments overlapping in a forbidden way", None, True, ValueError) self.log(u"Checking relative positions... done") self.__sorted = True
Sort the fragments in the list. :raises ValueError: if there is a fragment which violates the list constraints
def import_field(field_classpath): if in field_classpath: fully_qualified = field_classpath else: fully_qualified = "django.db.models.%s" % field_classpath try: return import_dotted_path(fully_qualified) except ImportError: raise ImproperlyConfigured("The EXTRA_MODEL_FIELDS setting contains " "the field which could not be " "imported." % field_classpath)
Imports a field by its dotted class path, prepending "django.db.models" to raw class names and raising an exception if the import fails.
def columns(x, rho, proxop): xnext = np.zeros_like(x) for ix in range(x.shape[1]): xnext[:, ix] = proxop(x[:, ix], rho) return xnext
Applies a proximal operator to the columns of a matrix
def read(self): for line in self.io.read(): with self.parse_line(line) as j: yield j
Iterate over all JSON input (Generator)
def check_hash(path, checksum, hash_type=): actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: raise ChecksumError(" != " % (checksum, actual_checksum))
Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum
def get_consensus_module(module_name): module_package = module_name if module_name == : module_package = ( ) elif module_name == : module_package = ( ) try: return importlib.import_module(module_package) except ImportError: raise UnknownConsensusModuleError( .format(module_name))
Returns a consensus module by name. Args: module_name (str): The name of the module to load. Returns: module: The consensus module. Raises: UnknownConsensusModuleError: Raised if the given module_name does not correspond to a consensus implementation.
def get_current_and_head_revision( database_url: str, alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]: head_revision = get_head_revision_from_alembic( alembic_config_filename=alembic_config_filename, alembic_base_dir=alembic_base_dir, version_table=version_table ) log.info("Intended database version: {}", head_revision) current_revision = get_current_revision( database_url=database_url, version_table=version_table ) log.info("Current database version: {}", current_revision) return current_revision, head_revision
Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions
def assert_valid(sysmeta_pyxb, pid): if not (_is_validation_enabled() and _is_installed_scimeta_format_id(sysmeta_pyxb)): return if _is_above_size_limit(sysmeta_pyxb): if _is_action_accept(): return else: raise d1_common.types.exceptions.InvalidRequest( 0, .format( sysmeta_pyxb.size, django.conf.settings.SCIMETA_VALIDATION_MAX_SIZE ), ) with d1_gmn.app.sciobj_store.open_sciobj_file_by_pid_ctx(pid) as sciobj_file: try: d1_scimeta.xml_schema.validate(sysmeta_pyxb.formatId, sciobj_file.read()) except d1_scimeta.xml_schema.SciMetaValidationError as e: raise d1_common.types.exceptions.InvalidRequest(0, str(e))
Validate file at {sciobj_path} against schema selected via formatId and raise InvalidRequest if invalid. Validation is only performed when: - SciMeta validation is enabled - and Object size is below size limit for validation - and formatId designates object as a Science Metadata object which is recognized and parsed by DataONE CNs - and XML Schema (XSD) files for formatId are present on local system
def get_node_label(self, model): if model.is_proxy: label = "(P) %s" % (model.name.title()) else: label = "%s" % (model.name.title()) line = "" new_label = [] for w in label.split(" "): if len(line + w) > 15: new_label.append(line) line = w else: line += " " line += w new_label.append(line) return "\n".join(new_label)
Defines how labels are constructed from models. Default - uses verbose name, lines breaks where sensible
def a_unexpected_prompt(ctx): prompt = ctx.ctrl.match.group(0) ctx.msg = "Received the jump host prompt: ".format(prompt) ctx.device.connected = False ctx.finished = True raise ConnectionError("Unable to connect to the device.", ctx.ctrl.hostname)
Provide message when received humphost prompt.
def _generateAlias(self): for i in range(1000): alias = % (i, ) if alias not in self.auth_level_aliases: return alias raise RuntimeError()
Return an unused auth level alias
def cumulative_min(self): from .. import extensions agg_op = "__builtin__cum_min__" return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
Return the cumulative minimum value of the elements in the SArray. Returns an SArray where each element in the output corresponds to the minimum value of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float). Returns ------- out : SArray[int, float] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. Examples -------- >>> sa = SArray([1, 2, 3, 4, 0]) >>> sa.cumulative_min() dtype: int rows: 3 [1, 1, 1, 1, 0]
def on_commit(self, changes): if _get_config(self)[] is False: return None for wh in self.whoosheers: if not wh.auto_update: continue writer = None for change in changes: if change[0].__class__ in wh.models: method_name = .format(change[1], change[0].__class__.__name__.lower()) method = getattr(wh, method_name, None) if method: if not writer: writer = type(self).get_or_create_index(_get_app(self), wh).\ writer(timeout=_get_config(self)[]) method(writer, change[0]) if writer: writer.commit()
Method that gets called when a model is changed. This serves to do the actual index writing.
def generate_parameters(self, parameter_id): if not self.population: raise RuntimeError() pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) total_config = indiv.config else: random.shuffle(self.population) if self.population[0].result < self.population[1].result: self.population[0] = self.population[1] space = json2space(self.searchspace_json, self.population[0].config) is_rand = dict() mutation_pos = space[random.randint(0, len(space)-1)] for i in range(len(self.space)): is_rand[self.space[i]] = (self.space[i] == mutation_pos) config = json2paramater( self.searchspace_json, is_rand, self.random_state, self.population[0].config) self.population.pop(1) total_config = config self.total_data[parameter_id] = total_config config = _split_index(total_config) return config
Returns a dict of trial (hyper-)parameters, as a serializable object. Parameters ---------- parameter_id : int Returns ------- config : dict
def lookup_facade(name, version): for _version in range(int(version), 0, -1): try: facade = getattr(CLIENTS[str(_version)], name) return facade except (KeyError, AttributeError): continue else: raise ImportError("No supported version for facade: " "{}".format(name))
Given a facade name and version, attempt to pull that facade out of the correct client<version>.py file.
def merge_record_data(self, changes, orig_record=None): current_app.logger.info("Merging request data with db record") current_app.logger.debug("orig_record: {}".format(orig_record)) current_app.logger.debug("Changes".format(changes)) final_record = changes if request.method == : final_record = dict(orig_record) final_record.update(changes) elif request.method == : if in orig_record: final_record[] = orig_record[] return final_record
This method merges PATCH requests with the db record to ensure no data is lost. In addition, it is also a hook for other fields to be overwritten, to ensure immutable fields aren't changed by a request.
def _string_like(self, patterns): return functools.reduce( operator.or_, ( ops.StringSQLLike(self, pattern).to_expr() for pattern in util.promote_list(patterns) ), )
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use % as a multiple-character wildcard or _ (underscore) as a single-character wildcard. Use re_search or rlike for regex-based matching. Parameters ---------- pattern : str or List[str] A pattern or list of patterns to match. If `pattern` is a list, then if **any** pattern matches the input then the corresponding row in the output is ``True``. Returns ------- matched : ir.BooleanColumn
def update_file(self, file_id, upload_id): put_data = { "upload[id]": upload_id, } return self._put("/files/" + file_id, put_data, content_type=ContentType.form)
Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label. :param file_id: str uuid of file :param upload_id: str uuid of the upload where all the file chunks where uploaded :param label: str short display label for the file :return: requests.Response containing the successful result
def fix_e125(self, result): num_indent_spaces = int(result[].split()[1]) line_index = result[] - 1 target = self.source[line_index] spaces_to_add = num_indent_spaces - len(_get_indentation(target)) indent = len(_get_indentation(target)) modified_lines = [] while len(_get_indentation(self.source[line_index])) >= indent: self.source[line_index] = ( * spaces_to_add + self.source[line_index]) modified_lines.append(1 + line_index) line_index -= 1 return modified_lines
Fix indentation undistinguish from the next logical line.
def global_iterator(self): global_iterator = sympy.Integer(0) total_length = sympy.Integer(1) for var_name, start, end, incr in reversed(self._loop_stack): loop_var = symbol_pos_int(var_name) length = end - start global_iterator += (loop_var - start) * total_length total_length *= length return global_iterator
Return global iterator sympy expression
def getImportPeople(self): fragment = ImportPeopleWidget(self.organizer) fragment.setFragmentParent(self) return fragment
Return an L{ImportPeopleWidget} which is a child of this fragment and which will add people to C{self.organizer}.
def render(self, treewalker, encoding=None): if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker)))
Serializes the stream from the treewalker into a string :arg treewalker: the treewalker to serialize :arg encoding: the string encoding to use :returns: the serialized tree Example: >>> from html5lib import parse, getTreeWalker >>> from html5lib.serializer import HTMLSerializer >>> token_stream = parse('<html><body>Hi!</body></html>') >>> walker = getTreeWalker('etree') >>> serializer = HTMLSerializer(omit_optional_tags=False) >>> serializer.render(walker(token_stream)) '<html><head></head><body>Hi!</body></html>'
def make_relative (self, other): if self.is_absolute (): return self from os.path import relpath other = self.__class__ (other) return self.__class__ (relpath (text_type (self), text_type (other)))
Return a new path that is the equivalent of this one relative to the path *other*. Unlike :meth:`relative_to`, this will not throw an error if *self* is not a sub-path of *other*; instead, it will use ``..`` to build a relative path. This can result in invalid relative paths if *other* contains a directory symbolic link. If *self* is an absolute path, it is returned unmodified.
def extract_from_image(self, image): ia.do_assert(image.ndim in [2, 3]) if len(self.exterior) <= 2: raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.") bb = self.to_bounding_box() bb_area = bb.extract_from_image(image) if self.is_out_of_image(image, fully=True, partly=False): return bb_area xx = self.xx_int yy = self.yy_int xx_mask = xx - np.min(xx) yy_mask = yy - np.min(yy) height_mask = np.max(yy_mask) width_mask = np.max(xx_mask) rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask)) mask = np.zeros((height_mask, width_mask), dtype=np.bool) mask[rr_face, cc_face] = True if image.ndim == 3: mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2])) return bb_area * mask
Extract the image pixels within the polygon. This function will zero-pad the image if the polygon is partially/fully outside of the image. Parameters ---------- image : (H,W) ndarray or (H,W,C) ndarray The image from which to extract the pixels within the polygon. Returns ------- result : (H',W') ndarray or (H',W',C) ndarray Pixels within the polygon. Zero-padded if the polygon is partially/fully outside of the image.
def as_list(value): if isinstance(value, (list, tuple)): return value if not value: return [] for c in : if c in value: value = value.split(c) return [v.strip() for v in value if v.strip()] return [value]
clever string spliting: .. code-block:: python >>> print(as_list('value')) ['value'] >>> print(as_list('v1 v2')) ['v1', 'v2'] >>> print(as_list(None)) [] >>> print(as_list(['v1'])) ['v1']
def _Rforce(self,R,z,phi=0.,t=0.): if True: if isinstance(R,nu.ndarray): if not isinstance(z,nu.ndarray): z= nu.ones_like(R)*z out= nu.array([self._Rforce(rr,zz) for rr,zz in zip(R,z)]) return out if (R > 16.*self._hr or R > 6.) and hasattr(self,): return self._kp.Rforce(R,z) if R < 1.: R4max= 1. else: R4max= R kmax= self._kmaxFac*self._beta kmax= 2.*self._kmaxFac*self._beta maxj1zeroIndx= nu.argmin((self._j1zeros-kmax*R4max)**2.) ks= nu.array([0.5*(self._glx+1.)*self._dj1zeros[ii+1] + self._j1zeros[ii] for ii in range(maxj1zeroIndx)]).flatten() weights= nu.array([self._glw*self._dj1zeros[ii+1] for ii in range(maxj1zeroIndx)]).flatten() evalInt= ks*special.jn(1,ks*R)*(self._alpha**2.+ks**2.)**-1.5*(self._beta*nu.exp(-ks*nu.fabs(z))-ks*nu.exp(-self._beta*nu.fabs(z)))/(self._beta**2.-ks**2.) return -2.*nu.pi*self._alpha*nu.sum(weights*evalInt)
NAME: Rforce PURPOSE: evaluate radial force K_R (R,z) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: K_R (R,z) HISTORY: 2010-04-16 - Written - Bovy (NYU) DOCTEST:
def required(self, fn): @functools.wraps(fn) def decorated(*args, **kwargs): if (not self._check_auth() and request.blueprint != self.blueprint.name): return redirect(url_for("%s.login" % self.blueprint.name, next=request.url)) return fn(*args, **kwargs) return decorated
Request decorator. Forces authentication.
def DocFileSuite(*paths, **kw): suite = unittest.TestSuite() if kw.get(, True): kw[] = _normalize_module(kw.get()) for path in paths: suite.addTest(DocFileTest(path, **kw)) return suite
A unittest suite for one or more doctest files. The path to each doctest file is given as a string; the interpretation of that string depends on the keyword argument "module_relative". A number of options may be provided as keyword arguments: module_relative If "module_relative" is True, then the given file paths are interpreted as os-independent module-relative paths. By default, these paths are relative to the calling module's directory; but if the "package" argument is specified, then they are relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and may not be an absolute path (i.e., it may not begin with "/"). If "module_relative" is False, then the given file paths are interpreted as os-specific paths. These paths may be absolute or relative (to the current working directory). package A Python package or the name of a Python package whose directory should be used as the base directory for module relative paths. If "package" is not specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. setUp A set-up function. This is called before running the tests in each file. The setUp function will be passed a DocTest object. The setUp function can access the test globals as the globs attribute of the test passed. tearDown A tear-down function. This is called after running the tests in each file. The tearDown function will be passed a DocTest object. The tearDown function can access the test globals as the globs attribute of the test passed. globs A dictionary containing initial global variables for the tests. optionflags A set of doctest option flags expressed as an integer. parser A DocTestParser (or subclass) that should be used to extract tests from the files.
def parse_skip_comment(self): skipped_error_codes = if self.current.kind == tk.COMMENT: if in self.current.value: skipped_error_codes = .join( self.current.value.split()[1:]) elif self.current.value.startswith(): skipped_error_codes = return skipped_error_codes
Parse a definition comment for noqa skips.
def up(self, role, root_priority, root_times): self.port_priority = root_priority self.port_times = root_times state = (PORT_STATE_LISTEN if self.config_enable else PORT_STATE_DISABLE) self._change_role(role) self._change_status(state)
A port is started in the state of LISTEN.
def set_joystick(self, x, y, n): self.robots[n].set_joystick(x, y)
Receives joystick values from the SnakeBoard x,y Coordinates n Robot number to give it to
def add_station(self, lv_station): if not isinstance(lv_station, LVStationDing0): raise Exception() if self._station is None: self._station = lv_station self.graph_add_node(lv_station) self.grid_district.lv_load_area.mv_grid_district.mv_grid.graph_add_node(lv_station)
Adds a LV station to _station and grid graph if not already existing
def get_queryset(self): model_type = self.request.GET.get("type") pk = self.request.GET.get("id") content_type_model = ContentType.objects.get(model=model_type.lower()) Model = content_type_model.model_class() model_obj = Model.objects.filter(id=pk).first() return Comment.objects.filter_by_object(model_obj)
Parameters are already validated in the QuerySetPermission
def getColRowWithinChannel(self, ra, dec, ch, wantZeroOffset=False, allowIllegalReturnValues=True): x, y = self.defaultMap.skyToPix(ra, dec) kepModule = self.getChannelAsPolygon(ch) r = np.array([x[0],y[0]]) - kepModule.polygon[0, :] v1 = kepModule.polygon[1, :] - kepModule.polygon[0, :] v3 = kepModule.polygon[3, :] - kepModule.polygon[0, :] col = colFrac*(1106-17) + 17 row = rowFrac*(1038-25) + 25 if not allowIllegalReturnValues: if not self.colRowIsOnSciencePixel(col, row): msg = "Request position %7f %.7f " % (ra, dec) msg += "does not lie on science pixels for channel %i " % (ch) msg += "[ %.1f %.1f]" % (col, row) raise ValueError(msg) if not wantZeroOffset: col += 1 row += 1 return (col, row)
Returns (col, row) given a (ra, dec) coordinate and channel number.
def to_text(path): import subprocess from distutils import spawn if not spawn.find_executable(): raise EnvironmentError() if not spawn.find_executable(): raise EnvironmentError() convert = [, , , path, , , ] p1 = subprocess.Popen(convert, stdout=subprocess.PIPE) tess = [, , ] p2 = subprocess.Popen(tess, stdin=p1.stdout, stdout=subprocess.PIPE) out, err = p2.communicate() extracted_str = out return extracted_str
Wraps Tesseract OCR. Parameters ---------- path : str path of electronic invoice in JPG or PNG format Returns ------- extracted_str : str returns extracted text from image in JPG or PNG format
def cells(self) -> Generator[Tuple[int, int], None, None]: yield from itertools.product( range(self.row_start, self.row_end), range(self.column_start, self.column_end) )
Generate cells in span.
def contract(self, jobs, result): for j in jobs: WorkerPool.put(self, j) r = [] for i in xrange(len(jobs)): r.append(result.get()) return r
Perform a contract on a number of jobs and block until a result is retrieved for each job.
def get_authservers(self, domainid, page=None): opts = {} if page: opts[] = page return self.api_call( ENDPOINTS[][], dict(domainid=domainid), **opts)
Get Authentication servers
def create_ref(self, ref, sha): json = None if ref and ref.count() >= 2 and sha: data = {: ref, : sha} url = self._build_url(, , base_url=self._api) json = self._json(self._post(url, data=data), 201) return Reference(json, self) if json else None
Create a reference in this repository. :param str ref: (required), fully qualified name of the reference, e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and contain at least two slashes, GitHub's API will reject it. :param str sha: (required), SHA1 value to set the reference to :returns: :class:`Reference <github3.git.Reference>` if successful else None
def ping(): try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type=, decode=True, ) log.debug( , response, ) if in response and response[].strip() == : return True except Exception as ex: log.error( , CONFIG[CONFIG_BASE_URL], ex, ) return False
Is the marathon api responding?