Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
388,700
def construct_publish_comands(additional_steps=None, nightly=False): ll use to actually build and publish a package to PyPI.rm -rf distpython setup.py sdist bdist_wheel{nightly} --nightlytwine upload dist/*', ] ) return publish_commands
Get the shell commands we'll use to actually build and publish a package to PyPI.
388,701
def set_xticks(self, row, column, ticks): subplot = self.get_subplot_at(row, column) subplot.set_xticks(ticks)
Manually specify the x-axis tick values. :param row,column: specify the subplot. :param ticks: list of tick values.
388,702
def _import_LOV( baseuri="http://lov.okfn.org/dataset/lov/api/v2/vocabulary/list", keyword=""): printDebug("----------\nReading source... <%s>" % baseuri) query = requests.get(baseuri, params={}) all_options = query.json() options = [] if keyword: for x in all_options: if keyword in x[].lower() or keyword in x[][0][ ].lower() or keyword in x[].lower(): options.append(x) else: options = all_options printDebug("----------\n%d results found.\n----------" % len(options)) if options: counter = 1 for x in options: uri, title, ns = x[], x[][0][], x[] click.echo( click.style("[%d]" % counter, fg=) + click.style(uri + " ==> ", fg=) + click.style(title, fg=)) counter += 1 while True: var = input(Style.BRIGHT + "=====\nSelect ID to import: (q=quit)\n" + Style.RESET_ALL) if var == "q": break else: try: _id = int(var) ontouri = options[_id - 1][] print(Fore.RED + "\n---------\n" + ontouri + "\n---------" + Style.RESET_ALL) action_analyze([ontouri]) if click.confirm( ): action_import(ontouri) return except: print("Error retrieving file. Import failed.") continue
2016-03-02: import from json list
388,703
def print_plugins(folders, exit_code=0): modules = plugins.get_plugin_modules(folders) pluginclasses = sorted(plugins.get_plugin_classes(modules), key=lambda x: x.__name__) for pluginclass in pluginclasses: print(pluginclass.__name__) doc = strformat.wrap(pluginclass.__doc__, 80) print(strformat.indent(doc)) print() sys.exit(exit_code)
Print available plugins and exit.
388,704
def getXmlText (parent, tag): elem = parent.getElementsByTagName(tag)[0] rc = [] for node in elem.childNodes: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return .join(rc)
Return XML content of given tag in parent element.
388,705
def sg_layer_func(func): r @wraps(func) def wrapper(tensor, **kwargs): r from . import sg_initializer as init from . import sg_activation opt = tf.sg_opt(kwargs) + sg_get_context() try: shape = tensor.get_shape().as_list() opt += tf.sg_opt(shape=shape, in_dim=shape[-1], dim=shape[-1], bn=False, ln=False, dout=0, summary=True, scale=True) if opt.regularizer == : opt.regularizer = lambda x: tf.reduce_mean(tf.abs(x)) elif opt.regularizer == : opt.regularizer = lambda x: tf.square(tf.reduce_mean(tf.square(x))) else: opt.regularizer = None assert not (opt.bn and opt.ln), opt += tf.sg_opt(bias=not (opt.bn or opt.ln)) finally: pass if opt.name is None: opt.name = func.__name__.replace(, ) exist_layers = [] for t in tf.global_variables(): scope_name = tf.get_variable_scope().name prefix = scope_name + if len(scope_name) > 0 else i = t.name.rfind(prefix + opt.name) if i >= 0: exist_layers.append(t.name[i:].split()[-2]) exist_layers = list(set(exist_layers)) if len(exist_layers) == 0: opt.name += else: opt.name += % (max([int(n.split()[-1]) for n in exist_layers]) + 1) with tf.variable_scope(opt.name, reuse=opt.reuse) as scope: out = func(tensor, opt) out_shape = out.get_shape() if opt.bn: beta = init.constant(, opt.dim, summary=opt.summary) gamma = init.constant(, opt.dim, value=1, summary=opt.summary, trainable=opt.scale) mean_running = init.constant(, opt.dim, trainable=False, summary=opt.summary) variance_running = init.constant(, opt.dim, value=1, trainable=False, summary=opt.summary) if out_shape.ndims in [2, 3, 4]: if out_shape.ndims == 2: out = tf.expand_dims(out, axis=1) out = tf.expand_dims(out, axis=2) elif out_shape.ndims == 3: out = tf.expand_dims(out, axis=2) fused_eps = tf.sg_eps if tf.sg_eps > 1e-5 else 1e-5 out, mean, variance = tf.cond( _phase, lambda: tf.nn.fused_batch_norm(out, gamma, beta, epsilon=fused_eps), lambda: tf.nn.fused_batch_norm(out, gamma, beta, mean=mean_running, variance=variance_running, epsilon=fused_eps, is_training=False), ) if out_shape.ndims == 2: out = tf.squeeze(out, axis=[1, 2]) elif out_shape.ndims == 3: out = tf.squeeze(out, axis=2) else: mean, variance = tf.nn.moments(out, axes=list(range(len(out.get_shape()) - 1))) out = tf.cond( _phase, lambda: tf.nn.batch_normalization(out, mean, variance, beta, gamma, tf.sg_eps), lambda: tf.nn.batch_normalization(out, mean_running, variance_running, beta, gamma, tf.sg_eps) ) decay = 0.99 tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_running.assign(mean_running * decay + mean * (1 - decay))) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_running.assign(variance_running * decay + variance * (1 - decay))) if opt.ln: beta = init.constant(, opt.dim, summary=opt.summary) if opt.scale: gamma = init.constant(, opt.dim, value=1, summary=opt.summary) mean, variance = tf.nn.moments(out, axes=[len(out.get_shape()) - 1], keep_dims=True) out = (out - mean) / tf.sqrt(variance + tf.sg_eps) if opt.scale: out = gamma * out + beta else: out = out + beta if opt.act: out = getattr(sg_activation, + opt.act.lower())(out) if opt.dout: out = tf.cond(_phase, lambda: tf.nn.dropout(out, 1 - opt.dout), lambda: out) out = tf.identity(out, ) if opt.summary: tf.sg_summary_activation(out) out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs) + sg_get_context(), prev=tensor, is_layer=True, name=opt.name) out.sg_reuse = types.MethodType(sg_reuse, out) return out return wrapper
r"""Decorates a function `func` as a sg_layer function. Args: func: function to decorate
388,706
def _create_affine(x_axis, y_axis, z_axis, image_pos, voxel_sizes): affine = numpy.array( [[x_axis[0] * voxel_sizes[0], y_axis[0] * voxel_sizes[1], z_axis[0] * voxel_sizes[2], image_pos[0]], [x_axis[1] * voxel_sizes[0], y_axis[1] * voxel_sizes[1], z_axis[1] * voxel_sizes[2], image_pos[1]], [x_axis[2] * voxel_sizes[0], y_axis[2] * voxel_sizes[1], z_axis[2] * voxel_sizes[2], image_pos[2]], [0, 0, 0, 1]]) return affine
Function to generate the affine matrix for a dicom series This method was based on (http://nipy.org/nibabel/dicom/dicom_orientation.html) :param sorted_dicoms: list with sorted dicom files
388,707
def get_waveset(model): if not isinstance(model, Model): raise SynphotError(.format(model)) if isinstance(model, _CompoundModel): waveset = model._tree.evaluate(WAVESET_OPERATORS, getter=None) else: waveset = _get_sampleset(model) return waveset
Get optimal wavelengths for sampling a given model. Parameters ---------- model : `~astropy.modeling.Model` Model. Returns ------- waveset : array-like or `None` Optimal wavelengths. `None` if undefined. Raises ------ synphot.exceptions.SynphotError Invalid model.
388,708
def show_all(self): fname = self.title title = self.title nblocks = self.nblocks silent = self._silent marquee = self.marquee for index,block in enumerate(self.src_blocks_colored): if silent[index]: print >>io.stdout, marquee( % (title,index,nblocks-index-1)) else: print >>io.stdout, marquee( % (title,index,nblocks-index-1)) print >>io.stdout, block, sys.stdout.flush()
Show entire demo on screen, block by block
388,709
def _get_batches(self, mapping, batch_size=10000): action = mapping.get("action", "insert") fields = mapping.get("fields", {}).copy() static = mapping.get("static", {}) lookups = mapping.get("lookups", {}) record_type = mapping.get("record_type") if action == "insert" and "Id" in fields: del fields["Id"] columns = [] columns.extend(fields.keys()) columns.extend(lookups.keys()) columns.extend(static.keys()) if record_type: columns.append("RecordTypeId") ) record_type_id = self.sf.query( query.format(mapping.get("sf_object"), record_type) )["records"][0]["Id"] query = self._query_db(mapping) total_rows = 0 batch_num = 1 def start_batch(): batch_file = io.BytesIO() writer = unicodecsv.writer(batch_file) writer.writerow(columns) batch_ids = [] return batch_file, writer, batch_ids batch_file, writer, batch_ids = start_batch() for row in query.yield_per(batch_size): total_rows += 1 pkey = row[0] row = list(row[1:]) + list(static.values()) if record_type: row.append(record_type_id) writer.writerow([self._convert(value) for value in row]) batch_ids.append(pkey) if not total_rows % batch_size: batch_file.seek(0) self.logger.info(" Processing batch {}".format(batch_num)) yield batch_file, batch_ids batch_file, writer, batch_ids = start_batch() batch_num += 1 if batch_ids: batch_file.seek(0) yield batch_file, batch_ids self.logger.info( " Prepared {} rows for import to {}".format( total_rows, mapping["sf_object"] ) )
Get data from the local db
388,710
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None, set_identifier=None): self._halt_if_already_deleted() return self._add_record(AAAAResourceRecordSet, **values)
Creates an AAAA record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :keyword int weight: *For weighted record sets only*. Among resource record sets that have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. Ranges from 0-255. :keyword str region: *For latency-based record sets*. The Amazon EC2 region where the resource that is specified in this resource record set resides. :keyword str set_identifier: *For weighted and latency resource record sets only*. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. 1-128 chars. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created AAAAResourceRecordSet instance.
388,711
def order(self, mechanism, purview): if self is Direction.CAUSE: return purview, mechanism elif self is Direction.EFFECT: return mechanism, purview from . import validate return validate.direction(self)
Order the mechanism and purview in time. If the direction is ``CAUSE``, then the purview is at |t-1| and the mechanism is at time |t|. If the direction is ``EFFECT``, then the mechanism is at time |t| and the purview is at |t+1|.
388,712
def _make_query(self, ID: str, methodname: str, returnable: bool, *args: Any, **kwargs: Any): query = { "MPRPC": self.VERSION, "ID": ID, "METHOD": methodname, "RETURN": returnable, "ARGS": args, "KWARGS": kwargs } print(query) return query
将调用请求的ID,方法名,参数包装为请求数据. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 returnable (bool): - 是否要求返回结果 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (Dict[str, Any]) : - 请求的python字典形式
388,713
def _make_cmap(colors, position=None, bit=False): bit_rgb = np.linspace(0,1,256) if position == None: position = np.linspace(0,1,len(colors)) else: if len(position) != len(colors): sys.exit("position length must be the same as colors") elif position[0] != 0 or position[-1] != 1: sys.exit("position must start with 0 and end with 1") palette = [(i, (float(r), float(g), float(b), float(a))) for i, (r, g, b, a) in enumerate(colors)] cmap = Colormap(*palette) return cmap
_make_cmap takes a list of tuples which contain RGB values. The RGB values may either be in 8-bit [0 to 255] (in which bit must be set to True when called) or arithmetic [0 to 1] (default). _make_cmap returns a cmap with equally spaced colors. Arrange your tuples so that the first color is the lowest value for the colorbar and the last is the highest. position contains values from 0 to 1 to dictate the location of each color.
388,714
def argmax(iterable, key=None, both=False): if key is not None: it = imap(key, iterable) else: it = iter(iterable) score, argmax = reduce(max, izip(it, count())) if both: return argmax, score return argmax
>>> argmax([4,2,-5]) 0 >>> argmax([4,2,-5], key=abs) 2 >>> argmax([4,2,-5], key=abs, both=True) (2, 5)
388,715
def rekey(self, uuid=None, offset=None, template_attribute=None, credential=None): operation = Operation(OperationEnum.REKEY) request_payload = payloads.RekeyRequestPayload( unique_identifier=uuid, offset=offset, template_attribute=template_attribute ) batch_item = messages.RequestBatchItem( operation=operation, request_payload=request_payload ) request = self._build_request_message(credential, [batch_item]) response = self._send_and_receive_message(request) batch_item = response.batch_items[0] payload = batch_item.response_payload result = {} if payload: result[] = payload.unique_identifier if payload.template_attribute is not None: result[] = payload.template_attribute result[] = batch_item.result_status.value try: result[] = batch_item.result_reason.value except Exception: result[] = batch_item.result_reason try: result[] = batch_item.result_message.value except Exception: result[] = batch_item.result_message return result
Check object usage according to specific constraints. Args: uuid (string): The unique identifier of a managed cryptographic object that should be checked. Optional, defaults to None. offset (int): An integer specifying, in seconds, the difference between the rekeyed objects initialization date and activation date. Optional, defaults to None. template_attribute (TemplateAttribute): A TemplateAttribute struct containing the attributes to set on the newly rekeyed object. Optional, defaults to None. credential (Credential): A Credential struct containing a set of authorization parameters for the operation. Optional, defaults to None. Returns: dict: The results of the check operation, containing the following key/value pairs: Key | Value ---------------------------|----------------------------------- 'unique_identifier' | (string) The unique ID of the | checked cryptographic object. 'template_attribute' | (TemplateAttribute) A struct | containing attribute set by the | server. Optional. 'result_status' | (ResultStatus) An enumeration | indicating the status of the | operation result. 'result_reason' | (ResultReason) An enumeration | providing context for the result | status. 'result_message' | (string) A message providing | additional context for the | operation result.
388,716
def for_all_targets(self, module, func, filter_func=None): for target in self.targets(module): if filter_func is None or filter_func(target): func(target)
Call func once for all of the targets of this module.
388,717
def process_tile(tile_x, tile_y, tile_size, pix, draw, image): logging.debug(, tile_x, tile_y) n, e, s, w = triangle_colors(tile_x, tile_y, tile_size, pix) d_ne = get_color_dist(n, e) d_nw = get_color_dist(n, w) d_se = get_color_dist(s, e) d_sw = get_color_dist(s, w) top_color = get_average_color([n, e]) bottom_color = get_average_color([s, w]) else: top_color = get_average_color([n, w]) bottom_color = get_average_color([s, e]) draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color, draw)
Process a tile whose top left corner is at the given x and y coordinates.
388,718
def byteswap(data, word_size=4): return reduce(lambda x,y: x+.join(reversed(y)), chunks(data, word_size), )
Swap the byte-ordering in a packet with N=4 bytes per word
388,719
def opendocs(where=, how=): import webbrowser docs_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), ) index = os.path.join(docs_dir, % where) builddocs() url = % os.path.abspath(index) if how in (, ): webbrowser.open(url) elif how in (, ): webbrowser.open_new_tab(url) elif how in (, , ): webbrowser.open_new(url)
Rebuild documentation and opens it in your browser. Use the first argument to specify how it should be opened: `d` or `default`: Open in new tab or new window, using the default method of your browser. `t` or `tab`: Open documentation in new tab. `n`, `w` or `window`: Open documentation in new window.
388,720
def setup_queue(self): logger.debug("Declaring queue %s" % self._queue) self._channel.queue_declare(self.on_queue_declareok, self._queue, durable=True)
Declare the queue When completed, the on_queue_declareok method will be invoked by pika.
388,721
def create_rectangular_prism(origin, size): from lace.topology import quads_to_tris lower_base_plane = np.array([ origin, origin + np.array([size[0], 0, 0]), origin + np.array([size[0], 0, size[2]]), origin + np.array([0, 0, size[2]]), ]) upper_base_plane = lower_base_plane + np.array([0, size[1], 0]) vertices = np.vstack([lower_base_plane, upper_base_plane]) faces = quads_to_tris(np.array([ [0, 1, 2, 3], [7, 6, 5, 4], [4, 5, 1, 0], [5, 6, 2, 1], [6, 7, 3, 2], [3, 7, 4, 0], ])) return Mesh(v=vertices, f=faces)
Return a Mesh which is an axis-aligned rectangular prism. One vertex is `origin`; the diametrically opposite vertex is `origin + size`. size: 3x1 array.
388,722
def optimal_t(self, t_max=100, plot=False, ax=None): tasklogger.log_start("optimal t") t, h = self.von_neumann_entropy(t_max=t_max) t_opt = vne.find_knee_point(y=h, x=t) tasklogger.log_info("Automatically selected t = {}".format(t_opt)) tasklogger.log_complete("optimal t") if plot: if ax is None: fig, ax = plt.subplots() show = True else: show = False ax.plot(t, h) ax.scatter(t_opt, h[t == t_opt], marker=, c=, s=50) ax.set_xlabel("t") ax.set_ylabel("Von Neumann Entropy") ax.set_title("Optimal t = {}".format(t_opt)) if show: plt.show() return t_opt
Find the optimal value of t Selects the optimal value of t based on the knee point of the Von Neumann Entropy of the diffusion operator. Parameters ---------- t_max : int, default: 100 Maximum value of t to test plot : boolean, default: False If true, plots the Von Neumann Entropy and knee point ax : matplotlib.Axes, default: None If plot=True and ax is not None, plots the VNE on the given axis Otherwise, creates a new axis and displays the plot Returns ------- t_opt : int The optimal value of t
388,723
async def open(self) -> : LOGGER.debug() await super().open() for path_rr_id in Tails.links(self._dir_tails, self.did): await self._sync_revoc(basename(path_rr_id)) LOGGER.debug() return self
Explicit entry. Perform ancestor opening operations, then synchronize revocation registry to tails tree content. :return: current object
388,724
def db_scan_block( block_id, op_list, db_state=None ): try: assert db_state is not None, "BUG: no state given" except Exception, e: log.exception(e) log.error("FATAL: no state given") os.abort() log.debug("SCAN BEGIN: {} ops at block {}".format(len(op_list), block_id)) checked_ops = [] for op_data in op_list: try: opcode = op_get_opcode_name( op_data[] ) assert opcode is not None, "BUG: unknown op " % op except Exception, e: log.exception(e) log.error("FATAL: invalid operation") os.abort() if opcode not in OPCODE_CREATION_OPS: continue op_check( db_state, op_data, block_id, checked_ops ) checked_ops.append( op_data ) collisions = db_state.find_collisions( checked_ops ) db_state.put_collisions( block_id, collisions ) log.debug("SCAN END: {} ops at block {} ({} collisions)".format(len(op_list), block_id, len(collisions)))
(required by virtualchain state engine) Given the block ID and the list of virtualchain operations in the block, do block-level preprocessing: * find the state-creation operations we will accept * make sure there are no collisions. This modifies op_list, but returns nothing. This aborts on runtime error.
388,725
def parse_port_pin(name_str): if len(name_str) < 3: raise ValueError("Expecting pin name to be at least 3 charcters.") if name_str[0] != : raise ValueError("Expecting pin name to start with P") if name_str[1] < or name_str[1] > : raise ValueError("Expecting pin port to be between A and K") port = ord(name_str[1]) - ord() pin_str = name_str[2:] if not pin_str.isdigit(): raise ValueError("Expecting numeric pin number.") return (port, int(pin_str))
Parses a string and returns a (port-num, pin-num) tuple.
388,726
def parse_file(filename): poscar_read = False poscar_string = [] dataset = [] all_dataset = [] all_dataset_aug = {} dim = None dimline = None read_dataset = False ngrid_pts = 0 data_count = 0 poscar = None with zopen(filename, "rt") as f: for line in f: original_line = line line = line.strip() if read_dataset: toks = line.split() for tok in toks: if data_count < ngrid_pts: x = data_count % dim[0] y = int(math.floor(data_count / dim[0])) % dim[1] z = int(math.floor(data_count / dim[0] / dim[1])) dataset[x, y, z] = float(tok) data_count += 1 if data_count >= ngrid_pts: read_dataset = False data_count = 0 all_dataset.append(dataset) elif not poscar_read: if line != "" or len(poscar_string) == 0: poscar_string.append(line) elif line == "": poscar = Poscar.from_string("\n".join(poscar_string)) poscar_read = True elif not dim: dim = [int(i) for i in line.split()] ngrid_pts = dim[0] * dim[1] * dim[2] dimline = line read_dataset = True dataset = np.zeros(dim) elif line == dimline: read_dataset = True dataset = np.zeros(dim) else: key = len(all_dataset) - 1 if key not in all_dataset_aug: all_dataset_aug[key] = [] all_dataset_aug[key].append(original_line) if len(all_dataset) == 4: data = {"total": all_dataset[0], "diff_x": all_dataset[1], "diff_y": all_dataset[2], "diff_z": all_dataset[3]} data_aug = {"total": all_dataset_aug.get(0, None), "diff_x": all_dataset_aug.get(1, None), "diff_y": all_dataset_aug.get(2, None), "diff_z": all_dataset_aug.get(3, None)} diff_xyz = np.array([data["diff_x"], data["diff_y"], data["diff_z"]]) diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2])) ref_direction = np.array([1.01, 1.02, 1.03]) ref_sign = np.sign(np.dot(ref_direction, diff_xyz)) diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign) data["diff"] = diff.reshape((dim[0], dim[1], dim[2])) elif len(all_dataset) == 2: data = {"total": all_dataset[0], "diff": all_dataset[1]} data_aug = {"total": all_dataset_aug.get(0, None), "diff": all_dataset_aug.get(1, None)} else: data = {"total": all_dataset[0]} data_aug = {"total": all_dataset_aug.get(0, None)} return poscar, data, data_aug
Convenience method to parse a generic volumetric data file in the vasp like format. Used by subclasses for parsing file. Args: filename (str): Path of file to parse Returns: (poscar, data)
388,727
def recarraydifference(X,Y): if len(Y) > 0: Z = recarrayisin(X,Y) return X[np.invert(Z)] else: return X
Records of a numpy recarray (or ndarray with structured dtype) that do not appear in another. Fast routine for determining which records in numpy array `X` do not appear in numpy recarray `Y`. Record array version of func:`tabular.fast.arraydifference`. **Parameters** **X** : numpy recarray Numpy recarray to comapare to numpy recarray `Y`. Return subset of `X` corresponding to elements not in `Y`. **Y** : numpy recarray Numpy recarray to which numpy recarray `X` is compared. Return subset of `X` corresponding to elements not in `Y`. **Returns** **Z** : numpy recarray Subset of `X` corresponding to elements not in `Y`. **See Also:** :func:`tabular.fast.arraydifference`, :func:`tabular.fast.recarrayisin`
388,728
def feed_line(self, line): self.line += 1 pos = 0 while pos < len(line): loc_start = TextLocationSingle(self.filename, self.line, pos + 1) if self.state is State.NORMAL: item_re = RE_TOKEN thing = elif self.state is State.STRING: item_re = RE_STRING_ITEM thing = elif self.state is State.BINARRAY: item_re = RE_BINARRAY_ITEM[self.binarray_base] thing = else: assert 0 match = item_re.match(line, pos) if not match: raise ReadError(f) pos = match.end() loc_end = TextLocationSingle(self.filename, self.line, pos + 1) loc = loc_start - loc_end if match[] is not None: raise ReadError(f) if self.state is State.NORMAL: if match[] is not None: self.stack.append(StackEntryList(loc_start, [])) elif match[] is not None: if not self.stack: raise ReadError(f) top = self.stack.pop() if not isinstance(top, StackEntryList): top.raise_unclosed_error() yield from self._feed_node(top.items, top.start - loc_end) elif match[] is not None: value = Symbol(match[]) yield from self._feed_node(value, loc) elif match[] is not None: self.stack.append(StackEntryComment(loc)) elif match[] is not None: value = match[] == yield from self._feed_node(value, loc) elif match[] is not None: yield from self._feed_node(None, loc) elif match[] is not None: if match[] is not None: value = int(match[], 0) elif match[] is not None: value = ord(match[]) elif match[] is not None: value = ord(ESCAPE_TO_CHAR[match[]]) elif match[] is not None: value = int(match[], 16) if value not in range(0x110000): raise ReadError( f) else: assert 0 if match[] is not None: width = int(match[]) if value < 0: value += 1 << width if value not in range(1 << width): raise ReadError(f) value = BinWord(width, value) yield from self._feed_node(value, loc) elif match[] is not None: self.binarray_base = { : 2, : 8, None: 10, : 16, }[match[]] self.binarray_data = [] self.binarray_width = int(match[]) self.token_start = loc_start self.state = State.BINARRAY elif match[] is not None: self.state = State.STRING self.token_start = loc_start self.string_buffer = StringIO() if match[] is not None: self.binarray_width = int(match[]) else: self.binarray_width = None elif self.state is State.STRING: if match[] is not None: self.state = State.NORMAL value = self.string_buffer.getvalue() loc = self.token_start - loc_end if self.binarray_width is not None: vals = [ord(x) for x in value] for x in vals: if x not in range(1 << self.binarray_width): raise ReadError( f) value = BinArray(vals, width=self.binarray_width) yield from self._feed_node(value, loc) elif match[] is not None: self.string_buffer.write(match[]) elif match[] is not None: c = ESCAPE_TO_CHAR[match[]] self.string_buffer.write(c) elif match[] is not None: code = int(match[], 16) if code not in range(0x110000): raise ReadError( f) self.string_buffer.write(chr(code)) else: assert 0 elif self.state is State.BINARRAY: if match[] is not None: self.state = State.NORMAL value = BinArray(self.binarray_data, width=self.binarray_width) loc = self.token_start - loc_end yield from self._feed_node(value, loc) elif match[] is not None: value = int(match[], self.binarray_base) if value < 0: value += 1 << self.binarray_width if value not in range(1 << self.binarray_width): raise ReadError(f) self.binarray_data.append(value) else: assert 0
Feeds one line of input into the reader machine. This method is a generator that yields all top-level S-expressions that have been recognized on this line (including multi-line expressions whose last character is on this line).
388,729
def set_html(self, html, url = None): if url: self.conn.issue_command(, html, url) else: self.conn.issue_command(, html)
Sets custom HTML in our Webkit session and allows to specify a fake URL. Scripts and CSS is dynamically fetched as if the HTML had been loaded from the given URL.
388,730
def root(reference_labels, estimated_labels): validate(reference_labels, estimated_labels) ref_roots, ref_semitones = encode_many(reference_labels, False)[:2] est_roots = encode_many(estimated_labels, False)[0] comparison_scores = (ref_roots == est_roots).astype(np.float) comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0 return comparison_scores
Compare chords according to roots. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> est_intervals, est_labels = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, ref_intervals.min(), ... ref_intervals.max(), mir_eval.chord.NO_CHORD, ... mir_eval.chord.NO_CHORD) >>> (intervals, ... ref_labels, ... est_labels) = mir_eval.util.merge_labeled_intervals( ... ref_intervals, ref_labels, est_intervals, est_labels) >>> durations = mir_eval.util.intervals_to_durations(intervals) >>> comparisons = mir_eval.chord.root(ref_labels, est_labels) >>> score = mir_eval.chord.weighted_accuracy(comparisons, durations) Parameters ---------- reference_labels : list, len=n Reference chord labels to score against. estimated_labels : list, len=n Estimated chord labels to score against. Returns ------- comparison_scores : np.ndarray, shape=(n,), dtype=float Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of gamut.
388,731
def get_kwargs(self, form, name): kwargs = { : self.get_prefix(form, name), : self.get_initial(form, name), } kwargs.update(self.default_kwargs) return kwargs
Return the keyword arguments that are used to instantiate the formset.
388,732
def read(fname): fpath = os.path.join(os.path.dirname(__file__), fname) with codecs.open(fpath, , ) as fhandle: return fhandle.read().strip()
utility function to read and return file contents
388,733
def decrement(self, key, value=1): return self._memcache.decr(self._prefix + key, value)
Decrement the value of an item in the cache. :param key: The cache key :type key: str :param value: The decrement value :type value: int :rtype: int or bool
388,734
def _align_with_substrings(self, chains_to_skip = set()): for c in self.representative_chains: if c not in chains_to_skip: fasta_sequence = self.fasta[c] substring_matches = {} for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.iteritems()): uniparc_sequence = str(uniparc_sequence) idx = uniparc_sequence.find(fasta_sequence) if idx != -1: substring_matches[uniparc_id] = 0 elif len(fasta_sequence) > 30: idx = uniparc_sequence.find(fasta_sequence[5:-5]) if idx != -1: substring_matches[uniparc_id] = 5 else: idx = uniparc_sequence.find(fasta_sequence[7:-7]) if idx != -1: substring_matches[uniparc_id] = 7 elif len(fasta_sequence) > 15: idx = uniparc_sequence.find(fasta_sequence[3:-3]) if idx != -1: substring_matches[uniparc_id] = 3 self.substring_matches[c] = substring_matches colortext.pcyan( * 100) pprint.pprint(self.substring_matches) if self.restrict_to_uniparc_values: for c in self.representative_chains: if set(map(str, self.substring_matches[c].keys())).intersection(set(self.restrict_to_uniparc_values)) > 0: restricted_matches = dict((str(k), self.substring_matches[c][k]) for k in self.substring_matches[c].keys() if str(k) in self.restrict_to_uniparc_values) if len(restricted_matches) != len(self.substring_matches[c]): removed_matches = sorted(set(self.substring_matches[c].keys()).difference(set(restricted_matches))) self.substring_matches[c] = restricted_matches for c_1, related_chains in self.equivalence_fiber.iteritems(): for c_2 in related_chains: self.substring_matches[c_2] = self.substring_matches[c_1]
Simple substring-based matching
388,735
async def create_vm(self, *, preset_name, image, flavor, security_groups=None, userdata=None, key_name=None, availability_zone=None, subnet=None): info = { : next(self._id_it), : preset_name, : [], : 0, : VmState.RUNNING, : flavor, : image, : {: }, : 1522753481, : [, , ] } logging.debug(, info) vm = Vm(self, **info) self._vms[vm.id] = vm logging.debug(, vm) return None
Dummy create_vm func.
388,736
def toString(self): slist = self.toList() sign = if slist[0] == else string = .join([ % v for v in slist[1:]]) return sign + string
Returns date as string.
388,737
def entry_modification_time(self): timestamp = self._fsntfs_attribute.get_entry_modification_time_as_integer() return dfdatetime_filetime.Filetime(timestamp=timestamp)
dfdatetime.Filetime: entry modification time or None if not set.
388,738
def create_container_service(access_token, subscription_id, resource_group, service_name, \ agent_count, agent_vm_size, agent_dns, master_dns, admin_user, location, public_key=None,\ master_count=3, orchestrator=, app_id=None, app_secret=None, admin_password=None, \ ostype=): endpoint = .join([get_rm_endpoint(), , subscription_id, , resource_group, , service_name, , ACS_API]) acs_body = {: location} properties = {: {: orchestrator}} properties[] = {: master_count, : master_dns} ap_profile = {: } ap_profile[] = agent_count ap_profile[] = agent_vm_size ap_profile[] = agent_dns properties[] = [ap_profile] if ostype == : linux_profile = {: admin_user} linux_profile[] = {: [{: public_key}]} properties[] = linux_profile else: windows_profile = {: admin_user, : admin_password} properties[] = windows_profile if orchestrator == : sp_profile = {: app_id} sp_profile[] = app_secret properties[] = sp_profile acs_body[] = properties body = json.dumps(acs_body) return do_put(endpoint, body, access_token)
Create a new container service - include app_id and app_secret if using Kubernetes. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. agent_count (int): The number of agent VMs. agent_vm_size (str): VM size of agents, e.g. Standard_D1_v2. agent_dns (str): A unique DNS string for the agent DNS. master_dns (str): A unique string for the master DNS. admin_user (str): Admin user name. location (str): Azure data center location, e.g. westus. public_key (str): RSA public key (utf-8). master_count (int): Number of master VMs. orchestrator (str): Container orchestrator. E.g. DCOS, Kubernetes. app_id (str): Application ID for Kubernetes. app_secret (str): Application secret for Kubernetes. admin_password (str): Admin user password. ostype (str): Operating system. Windows of Linux. Returns: HTTP response. Container service JSON model.
388,739
def is_course_complete(last_update): rv = False if last_update >= 0: delta = time.time() - last_update max_delta = total_seconds(datetime.timedelta(days=30)) if delta > max_delta: rv = True return rv
Determine is the course is likely to have been terminated or not. We return True if the timestamp given by last_update is 30 days or older than today's date. Otherwise, we return True. The intended use case for this is to detect if a given courses has not seen any update in the last 30 days or more. Otherwise, we return True, since it is probably too soon to declare the course complete.
388,740
def get_public_key(key, passphrase=None, asObj=False): * if isinstance(key, M2Crypto.X509.X509): rsa = key.get_pubkey().get_rsa() text = b else: text = _text_or_file(key) text = get_pem_entry(text) if text.startswith(b): if not asObj: return text bio = M2Crypto.BIO.MemoryBuffer() bio.write(text) rsa = M2Crypto.RSA.load_pub_key_bio(bio) bio = M2Crypto.BIO.MemoryBuffer() if text.startswith(b): cert = M2Crypto.X509.load_cert_string(text) rsa = cert.get_pubkey().get_rsa() if text.startswith(b): csr = M2Crypto.X509.load_request_string(text) rsa = csr.get_pubkey().get_rsa() if (text.startswith(b) or text.startswith(b)): rsa = M2Crypto.RSA.load_key_string( text, callback=_passphrase_callback(passphrase)) if asObj: evppubkey = M2Crypto.EVP.PKey() evppubkey.assign_rsa(rsa) return evppubkey rsa.save_pub_key_bio(bio) return bio.read_all()
Returns a string containing the public key in PEM format. key: A path or PEM encoded string containing a CSR, Certificate or Private Key from which a public key can be retrieved. CLI Example: .. code-block:: bash salt '*' x509.get_public_key /etc/pki/mycert.cer
388,741
def get_cached_source_variable(self, source_id, variable, default=None): source_id = int(source_id) try: return self._retrieve_cached_source_variable( source_id, variable) except UncachedVariable: return default
Get the cached value of a source variable. If the variable is not cached return the default value.
388,742
def remove_slug(path): if path.endswith(): path = path[:-1] if path.startswith(): path = path[1:] if "/" not in path or not path: return None parts = path.split("/")[:-1] return "/".join(parts)
Return the remainin part of the path >>> remove_slug('/test/some/function/') test/some
388,743
def persistent_timer(func): @functools.wraps(func) def timed_function(optimizer_instance, *args, **kwargs): start_time_path = "{}/.start_time".format(optimizer_instance.phase_output_path) try: with open(start_time_path) as f: start = float(f.read()) except FileNotFoundError: start = time.time() with open(start_time_path, "w+") as f: f.write(str(start)) result = func(optimizer_instance, *args, **kwargs) execution_time = str(dt.timedelta(seconds=time.time() - start)) logger.info("{} took {} to run".format( optimizer_instance.phase_name, execution_time )) with open("{}/execution_time".format(optimizer_instance.phase_output_path), "w+") as f: f.write(execution_time) return result return timed_function
Times the execution of a function. If the process is stopped and restarted then timing is continued using saved files. Parameters ---------- func Some function to be timed Returns ------- timed_function The same function with a timer attached.
388,744
def start(self): yield from self._do_connect() _LOGGER.info(, self._host, self._port) status = yield from self.status() self.synchronize(status) self._on_server_connect()
Initiate server connection.
388,745
def close(self): if self._closeable: self._close() elif self._transaction: self._reset()
Close the tough connection. You are allowed to close a tough connection by default and it will not complain if you close it more than once. You can disallow closing connections by setting the closeable parameter to something false. In this case, closing tough connections will be silently ignored.
388,746
def load_or_create(cls, filename=None, no_input=False, create_new=False, **kwargs): parser = argparse.ArgumentParser() parser.add_argument(, action=) parser.add_argument(, action=) args = parser.parse_args() if args.no_input: print() no_input = True if args.create_new: print() create_new = True no_input = True def savefile_more_recent(): time_savefile = os.path.getmtime(filename) time_program = os.path.getmtime(sys.argv[0]) return time_savefile > time_program def load_pickle(): with open(filename, ) as of: statefile_version, data = pickle.load(of) if statefile_version != STATEFILE_VERSION: raise RuntimeError(f) return data def load(): print( % filename) obj_list, config = load_pickle() system = System(load_state=obj_list, filename=filename, **kwargs) return system def create(): print() config = None if filename: try: obj_list, config = load_pickle() except FileNotFoundError: config = None return cls(filename=filename, load_config=config, **kwargs) if filename and os.path.isfile(filename): if savefile_more_recent() and not create_new: return load() else: if no_input: print() return create() while True: answer = input() if answer == : return create() elif answer == : return load() else: return create()
Load system from a dump, if dump file exists, or create a new system if it does not exist.
388,747
def add_view_permissions(sender, verbosity, **kwargs): from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import Permission for content_type in ContentType.objects.all(): codename = "view_%s" % content_type.model _, created = Permission.objects \ .get_or_create(content_type=content_type, codename=codename, defaults={: % content_type.name}) if created and verbosity >= 1: print( % content_type.name)
This post_syncdb/post_migrate hooks takes care of adding a view permission too all our content types.
388,748
def required_arguments(func): defaults = default_values_of(func) args = arguments_of(func) if defaults: args = args[:-len(defaults)] return args
Return all arguments of a function that do not have a default value.
388,749
def _read_buffer(self, data_in): while data_in: data_in, channel_id, frame_in = self._handle_amqp_frame(data_in) if frame_in is None: break self.heartbeat.register_read() if channel_id == 0: self._channel0.on_frame(frame_in) elif channel_id in self._channels: self._channels[channel_id].on_frame(frame_in) return data_in
Process the socket buffer, and direct the data to the appropriate channel. :rtype: bytes
388,750
def create_negotiate_message(self, domain_name=None, workstation=None): self.negotiate_message = NegotiateMessage(self.negotiate_flags, domain_name, workstation) return base64.b64encode(self.negotiate_message.get_data())
Create an NTLM NEGOTIATE_MESSAGE :param domain_name: The domain name of the user account we are authenticating with, default is None :param worksation: The workstation we are using to authenticate with, default is None :return: A base64 encoded string of the NEGOTIATE_MESSAGE
388,751
def iscomplex(polynomial): if isinstance(polynomial, (int, float)): return False if isinstance(polynomial, complex): return True polynomial = polynomial.expand() for monomial in polynomial.as_coefficients_dict(): for variable in monomial.as_coeff_mul()[1]: if isinstance(variable, complex) or variable == I: return True return False
Returns whether the polynomial has complex coefficients :param polynomial: Polynomial of noncommutive variables. :type polynomial: :class:`sympy.core.expr.Expr`. :returns: bool -- whether there is a complex coefficient.
388,752
def download(self, id, attid): resp = self.service.get_id(self._base(id), attid, params={: }, stream=True) b = io.BytesIO() stream.stream_response_to_file(resp, path=b) resp.close() b.seek(0) return (b, self.service.filename(resp))
Download a device's attachment. :param id: Device ID as an int. :param attid: Attachment ID as an int. :rtype: tuple `(io.BytesIO, 'filename')`
388,753
def log(self, metrics_dict): if self.writer: self.write_to_file(metrics_dict) if self.verbose: self.print_to_screen(metrics_dict) self.reset()
Print calculated metrics and optionally write to file (json/tb)
388,754
def index_model(index_name, adapter): model = adapter.model log.info(.format(model.__name__)) qs = model.objects if hasattr(model.objects, ): qs = qs.visible() if adapter.exclude_fields: qs = qs.exclude(*adapter.exclude_fields) docs = iter_qs(qs, adapter) docs = iter_for_index(docs, index_name) for ok, info in streaming_bulk(es.client, docs, raise_on_error=False): if not ok: log.error(, model.__name__, info[][], info[][])
Indel all objects given a model
388,755
def conf(self): return self.env.get_template().render( metadata=self.metadata, package=self.package)
Generate the Sphinx `conf.py` configuration file Returns: (str): the contents of the `conf.py` file.
388,756
def grid(self, dimensions=None, **kwargs): dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return GridSpace(self, **kwargs).reindex(dimensions) return self.groupby(dimensions, container_type=GridSpace, **kwargs)
Group by supplied dimension(s) and lay out groups in grid Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a GridSpace. Args: dimensions: Dimension/str or list Dimension or list of dimensions to group by Returns: GridSpace with supplied dimensions
388,757
def computeRawAnomalyScore(activeColumns, prevPredictedColumns): nActiveColumns = len(activeColumns) if nActiveColumns > 0: score = numpy.in1d(activeColumns, prevPredictedColumns).sum() score = (nActiveColumns - score) / float(nActiveColumns) else: score = 0.0 return score
Computes the raw anomaly score. The raw anomaly score is the fraction of active columns not predicted. :param activeColumns: array of active column indices :param prevPredictedColumns: array of columns indices predicted in prev step :returns: anomaly score 0..1 (float)
388,758
def write(models, out=None, base=None, logger=logging): assert out is not None if not isinstance(models, list): models = [models] for m in models: for link in m.match(): s, p, o = link[:3] if s == (base or ) + : continue if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p] if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o] if p == VERSA_TYPE_REL: p = RDF_TYPE_REL print(strconv(s), strconv(p), strconv(o), , file=out) return
models - one or more input Versa models from which output is generated.
388,759
def build_inside(input_method, input_args=None, substitutions=None): def process_keyvals(keyvals): keyvals = keyvals or [] processed_keyvals = {} for arg in keyvals: key, value = arg.split("=", 1) processed_keyvals[key] = value return processed_keyvals main = __name__.split(, 1)[0] log_encoding = get_logging_encoding(main) logger.info("log encoding: %s", log_encoding) if not input_method: raise RuntimeError("No input method specified!") logger.debug("getting build json from input %s", input_method) cleaned_input_args = process_keyvals(input_args) cleaned_input_args[] = process_keyvals(substitutions) input_runner = InputPluginsRunner([{: input_method, : cleaned_input_args}]) build_json = input_runner.run()[input_method] if isinstance(build_json, Exception): raise RuntimeError("Input plugin raised exception: {}".format(build_json)) logger.debug("build json: %s", build_json) if not build_json: raise RuntimeError("No valid build json!") if not isinstance(build_json, dict): raise RuntimeError("Input plugin did not return valid build json: {}".format(build_json)) dbw = DockerBuildWorkflow(**build_json) try: build_result = dbw.build_docker_image() except Exception as e: logger.error(, e) raise else: if not build_result or build_result.is_failed(): raise RuntimeError("no image built") else: logger.info("build has finished successfully \\o/")
use requested input plugin to load configuration and then initiate build
388,760
def start_system(components, bind_to, hooks={}): deps = build_deps_graph(components) started_components = start_components(components, deps, None) run_hooks(hooks, started_components) if type(bind_to) is str: master = started_components[bind_to] else: master = bind_to setattr(master, , started_components) return master
Start all components on component map.
388,761
def make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring=, scoring_cv=3, **kwargs): fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \ if use_baart else Fourier() pipeline = Pipeline([(, fourier), (, regressor)]) if use_baart: return pipeline else: params = {: list(range(fourier_degree[0], fourier_degree[1]+1))} return Selector(pipeline, params, scoring=scoring, cv=scoring_cv, n_jobs=selector_processes)
make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs) Makes a predictor object for use in :func:`get_lightcurve`. **Parameters** regressor : object with "fit" and "transform" methods, optional Regression object used for solving Fourier matrix (default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``). Selector : class with "fit" and "predict" methods, optional Model selection class used for finding the best fit (default :class:`sklearn.grid_search.GridSearchCV`). selector_processes : positive integer, optional Number of processes to use for *Selector* (default 1). use_baart : boolean, optional If True, ignores *Selector* and uses Baart's Criteria to find the Fourier degree, within the boundaries (default False). fourier_degree : 2-tuple, optional Tuple containing lower and upper bounds on Fourier degree, in that order (default (2, 25)). scoring : str, optional Scoring method to use for *Selector*. This parameter can be: * "r2", in which case use :math:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). **Returns** out : object with "fit" and "predict" methods The created predictor object.
388,762
def rlmb_long_stochastic_discrete_100steps(): hparams = rlmb_long_stochastic_discrete() hparams.ppo_epoch_length = 100 hparams.simulated_rollout_length = 100 hparams.simulated_batch_size = 8 return hparams
Long setting with stochastic discrete model, changed ppo steps.
388,763
def _process_genes(self, limit=None): LOG.info("Processing genes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 family = Family(graph) geno = Genotype(graph) raw = .join((self.rawdir, self.files[][])) with open(raw, , encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter=, quotechar=) for row in filereader: line_counter += 1 (gene_id, gene_name) = row gene_id = +gene_id.strip() gene_stuff = re.split(, gene_name) symbollist = re.split(r, gene_stuff[0]) first_symbol = symbollist[0].strip() if gene_id not in self.label_hash: self.label_hash[gene_id] = first_symbol if self.test_mode and gene_id not in self.test_ids[]: continue geno.addGene(gene_id, first_symbol) if len(gene_stuff) > 1: description = gene_stuff[1].strip() model.addDefinition(gene_id, description) for i in enumerate(symbollist, start=1): model.addSynonym(gene_id, i[1].strip()) if len(gene_stuff) > 2: ko_part = gene_stuff[2] ko_match = re.search(r, ko_part) if ko_match is not None and len(ko_match.groups()) == 1: ko = +ko_match.group(1) family.addMemberOf(gene_id, ko) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with genes") return
This method processes the KEGG gene IDs. The label for the gene is pulled as the first symbol in the list of gene symbols; the rest are added as synonyms. The long-form of the gene name is added as a definition. This is hardcoded to just processes human genes. Triples created: <gene_id> is a SO:gene <gene_id> rdfs:label <gene_name> :param limit: :return:
388,764
def tear_down(self): super(ReceiverController, self).tear_down() self.status = None self.launch_failure = None self.app_to_launch = None self.app_launch_event.clear() self._status_listeners[:] = []
Called when controller is destroyed.
388,765
def namelist_handle(tokens): if len(tokens) == 1: return tokens[0] elif len(tokens) == 2: return tokens[0] + "\n" + tokens[0] + " = " + tokens[1] else: raise CoconutInternalException("invalid in-line nonlocal / global tokens", tokens)
Process inline nonlocal and global statements.
388,766
def parse_rules(data, chain): rules = [] for line in data.splitlines(True): m = re_rule.match(line) if m and m.group(3) == chain: rule = parse_rule(m.group(4)) rule.packets = int(m.group(1)) rule.bytes = int(m.group(2)) rules.append(rule) return rules
Parse the rules for the specified chain.
388,767
def execd_module_paths(execd_dir=None): if not execd_dir: execd_dir = default_execd_dir() if not os.path.exists(execd_dir): return for subpath in os.listdir(execd_dir): module = os.path.join(execd_dir, subpath) if os.path.isdir(module): yield module
Generate a list of full paths to modules within execd_dir.
388,768
def _render_expression(self, check): expressions = [] args = [] skeys = set(check.keys()) skeys.difference_update(set(self._keys)) skeys.difference_update(set([, ])) if skeys: raise KeyError("Illegal testing key(s): %s"%skeys) for name,sub_check in check.iteritems(): if isinstance(sub_check, dict): for test,value in sub_check.iteritems(): try: op = operators[test] except KeyError: raise KeyError("Unsupported operator: %r"%test) if isinstance(op, tuple): op, join = op if value is None and op in null_operators: expr = "%s %s" % (name, null_operators[op]) else: expr = "%s %s ?"%(name, op) if isinstance(value, (tuple,list)): if op in null_operators and any([v is None for v in value]): if sub_check is None: expressions.append("%s IS NULL" % name) else: expressions.append("%s = ?"%name) args.append(sub_check) expr = " AND ".join(expressions) return expr, args
Turn a mongodb-style search dict into an SQL query.
388,769
def _validate(config): for mandatory_key in _mandatory_keys: if mandatory_key not in config: raise KeyError(mandatory_key) for key in config.keys(): if key not in _mandatory_keys and key not in _optional_keys: raise SyntaxError(key) if not isinstance(config[key], _default_config[key].__class__): raise ValueError(key)
Config validation Raises: KeyError on missing mandatory key SyntaxError on invalid key ValueError on invalid value for key :param config: {dict} config to validate :return: None
388,770
def check_has_docstring(self, api): if not api.__doc__: msg = return [msg.format(api.__name__)]
An API class must have a docstring.
388,771
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin): classpath = self._nailgun_classpath + classpath new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version) with self._NAILGUN_SPAWN_LOCK: running, updated = self._check_nailgun_state(new_fingerprint) if running and updated: logger.debug( .format(server=self._identity)) self.terminate() if (not running) or (running and updated): return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin) return self._create_ngclient(self.socket, stdout, stderr, stdin)
This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles creation of the running nailgun server as well as creation of the client.
388,772
def to_dict(self): return dict( (attr, getattr(self, attr)) for attr in self.ATTRIBUTES if hasattr(self, attr) )
Return a dict that can be serialised to JSON and sent to UpCloud's API.
388,773
async def _handle_container_timeout(self, container_id, timeout): try: docker_stats = await self._docker_interface.get_stats(container_id) source = AsyncIteratorWrapper(docker_stats) nano_timeout = timeout * (10 ** 9) async for upd in source: if upd is None: await self._kill_it_with_fire(container_id) self._logger.debug("%i", upd[][][]) if upd[][][] > nano_timeout: self._logger.info("Killing container %s as it used %i CPU seconds (max was %i)", container_id, int(upd[][][] / (10 ** 9)), timeout) await self._kill_it_with_fire(container_id) return except asyncio.CancelledError: pass except: self._logger.exception("Exception in _handle_container_timeout")
Check timeout with docker stats :param container_id: :param timeout: in seconds (cpu time)
388,774
def next_permutation(tab): n = len(tab) pivot = None for i in range(n - 1): if tab[i] < tab[i + 1]: pivot = i if pivot is None: return False for i in range(pivot + 1, n): if tab[i] > tab[pivot]: swap = i tab[swap], tab[pivot] = tab[pivot], tab[swap] i = pivot + 1 j = n - 1 while i < j: tab[i], tab[j] = tab[j], tab[i] i += 1 j -= 1 return True
find the next permutation of tab in the lexicographical order :param tab: table with n elements from an ordered set :modifies: table to next permutation :returns: False if permutation is already lexicographical maximal :complexity: O(n)
388,775
def _filter_commands(ctx, commands=None): lookup = getattr(ctx.command, , {}) if not lookup and isinstance(ctx.command, click.MultiCommand): lookup = _get_lazyload_commands(ctx.command) if commands is None: return sorted(lookup.values(), key=lambda item: item.name) names = [name.strip() for name in commands.split()] return [lookup[name] for name in names if name in lookup]
Return list of used commands.
388,776
def set_scene_velocity(self, scene_id, velocity): if not scene_id in self.state.scenes: err_msg = "Requested to set velocity on scene {sceneNum}, which does not exist".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(velocity=velocity) sequence_number = self.zmq_publisher.publish_scene_velocity(scene_id, velocity) logging.debug("set velocity on scene {sceneNum}".format(sceneNum=scene_id)) if scene_id == self.state.activeSceneId: self.state.activeAnimation.set_velocity(velocity) self._do_next_frame() return (True, sequence_number, "OK")
reconfigure a scene by scene ID
388,777
def _has_local_storage(self, pod=None): for vol in pod.volumes: if vol.emptyDir is not None: return True return False
Determines if a K8sPod has any local storage susceptible to be lost. :param pod: The K8sPod we're interested in. :return: a boolean.
388,778
def parse_route_name_and_version(route_repr): if in route_repr: route_name, version = route_repr.split(, 1) try: version = int(version) except ValueError: raise ValueError(.format(route_repr)) else: route_name = route_repr version = 1 return route_name, version
Parse a route representation string and return the route name and version number. :param route_repr: Route representation string. :return: A tuple containing route name and version number.
388,779
def _execute_hooks(self, element): if self.hooks and self.finalize_hooks: self.param.warning( "Supply either hooks or finalize_hooks not both, " "using hooks and ignoring finalize_hooks.") hooks = self.hooks or self.finalize_hooks for hook in hooks: try: hook(self, element) except Exception as e: self.param.warning("Plotting hook %r could not be " "applied:\n\n %s" % (hook, e))
Executes finalize hooks
388,780
def append(self, other, ignore_index=False): if not isinstance(other, self.__class__): raise ValueError() if type(ignore_index) is bool: new_frame = self._frame.append(other._frame, ignore_index=ignore_index, verify_integrity=True) else: new_frame = self._frame.append(other._frame, ignore_index=True, verify_integrity=True) if type(ignore_index) is int: new_frame.index = range(ignore_index, ignore_index + len(new_frame)) else: new_frame.index = ignore_index return self.__class__(new_frame)
Append rows of `other` to the end of this frame, returning a new object. Wrapper around the :meth:`pandas.DataFrame.append` method. Args: other (Cartesian): ignore_index (sequence, bool, int): If it is a boolean, it behaves like in the description of :meth:`pandas.DataFrame.append`. If it is a sequence, it becomes the new index. If it is an integer, ``range(ignore_index, ignore_index + len(new))`` becomes the new index. Returns: Cartesian:
388,781
def _filter_nodes(superclass, all_nodes=_all_nodes): node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) return frozenset(node_names)
Filter out AST nodes that are subclasses of ``superclass``.
388,782
def find_side(ls, side): minx, miny, maxx, maxy = ls.bounds points = {: [(minx, miny), (minx, maxy)], : [(maxx, miny), (maxx, maxy)], : [(minx, miny), (maxx, miny)], : [(minx, maxy), (maxx, maxy)],} return sgeom.LineString(points[side])
Given a shapely LineString which is assumed to be rectangular, return the line corresponding to a given side of the rectangle.
388,783
def _run_init_queries(self): for obj in (Package, PackageCfgFile, PayloadFile, IgnoredDir, AllowedDir): self._db.create_table_from_object(obj())
Initialization queries
388,784
def occurrence_halved_fingerprint( word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG ): return OccurrenceHalved().fingerprint(word, n_bits, most_common)
Return the occurrence halved fingerprint. This is a wrapper for :py:meth:`OccurrenceHalved.fingerprint`. Parameters ---------- word : str The word to fingerprint n_bits : int Number of bits in the fingerprint returned most_common : list The most common tokens in the target language, ordered by frequency Returns ------- int The occurrence halved fingerprint Examples -------- >>> bin(occurrence_halved_fingerprint('hat')) '0b1010000000010' >>> bin(occurrence_halved_fingerprint('niall')) '0b10010100000' >>> bin(occurrence_halved_fingerprint('colin')) '0b1001010000' >>> bin(occurrence_halved_fingerprint('atcg')) '0b10100000000000' >>> bin(occurrence_halved_fingerprint('entreatment')) '0b1111010000110000'
388,785
def power_corr(r=None, n=None, power=None, alpha=0.05, tail=): n_none = sum([v is None for v in [r, n, power, alpha]]) if n_none != 1: raise ValueError() if r is not None: assert -1 <= r <= 1 r = abs(r) if alpha is not None: assert 0 < alpha <= 1 if power is not None: assert 0 < power <= 1 if n is not None: assert n > 4 if tail == : def func(r, n, power, alpha): dof = n - 2 ttt = stats.t.ppf(1 - alpha / 2, dof) rc = np.sqrt(ttt**2 / (ttt**2 + dof)) zr = np.arctanh(r) + r / (2 * (n - 1)) zrc = np.arctanh(rc) power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) + \ stats.norm.cdf((-zr - zrc) * np.sqrt(n - 3)) return power else: def func(r, n, power, alpha): dof = n - 2 ttt = stats.t.ppf(1 - alpha, dof) rc = np.sqrt(ttt**2 / (ttt**2 + dof)) zr = np.arctanh(r) + r / (2 * (n - 1)) zrc = np.arctanh(rc) power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) return power if power is None and n is not None and r is not None: return func(r, n, power=None, alpha=alpha) elif n is None and power is not None and r is not None: def _eval_n(n, r, power, alpha): return func(r, n, power, alpha) - power try: return brenth(_eval_n, 4 + 1e-10, 1e+09, args=(r, power, alpha)) except ValueError: return np.nan elif r is None and power is not None and n is not None: def _eval_r(r, n, power, alpha): return func(r, n, power, alpha) - power try: return brenth(_eval_r, 1e-10, 1 - 1e-10, args=(n, power, alpha)) except ValueError: return np.nan else: def _eval_alpha(alpha, r, n, power): return func(r, n, power, alpha) - power try: return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(r, n, power)) except ValueError: return np.nan
Evaluate power, sample size, correlation coefficient or significance level of a correlation test. Parameters ---------- r : float Correlation coefficient. n : int Number of observations (sample size). power : float Test power (= 1 - type II error). alpha : float Significance level (type I error probability). The default is 0.05. tail : str Indicates whether the test is "two-sided" or "one-sided". Notes ----- Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must be passed as None, and that parameter is determined from the others. Notice that ``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to compute it. :py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e. sample size, effect size, or significance level). If the solving fails, a nan value is returned. This function is a mere Python translation of the original `pwr.r.test` function implemented in the `pwr` R package. All credit goes to the author, Stephane Champely. References ---------- .. [1] Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum. .. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf Examples -------- 1. Compute achieved power given ``r``, ``n`` and ``alpha`` >>> from pingouin import power_corr >>> print('power: %.4f' % power_corr(r=0.5, n=20)) power: 0.6379 2. Compute required sample size given ``r``, ``power`` and ``alpha`` >>> print('n: %.4f' % power_corr(r=0.5, power=0.80, ... tail='one-sided')) n: 22.6091 3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level >>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05)) r: 0.5822 4. Compute achieved alpha level given ``r``, ``n`` and ``power`` >>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80, ... alpha=None)) alpha: 0.1377
388,786
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()): if (typeName, schemaVersion) in _legacyTypes: return _legacyTypes[typeName, schemaVersion] if dummyBases: realBases = [declareLegacyItem(*A) for A in dummyBases] else: realBases = (Item,) attributes = attributes.copy() attributes[] = attributes[] = True attributes[] = typeName attributes[] = schemaVersion result = type(str( % (typeName, schemaVersion)), realBases, attributes) assert result is not None, % (type,) _legacyTypes[(typeName, schemaVersion)] = result return result
Generate a dummy subclass of Item that will have the given attributes, and the base Item methods, but no methods of its own. This is for use with upgrading. @param typeName: a string, the Axiom TypeName to have attributes for. @param schemaVersion: an int, the (old) version of the schema this is a proxy for. @param attributes: a dict mapping {columnName: attr instance} describing the schema of C{typeName} at C{schemaVersion}. @param dummyBases: a sequence of 4-tuples of (baseTypeName, baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases of this legacy class.
388,787
def get_rc_creds(): config = get_config() try: return ( config.get(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT), config.get(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN), ) except: return (, )
Reads ~/.rightscalerc and returns API endpoint and refresh token. Always returns a tuple of strings even if the file is empty - in which case, returns ``('', '')``.
388,788
def get_bucket_region(self, bucket) -> str: region = self.s3_client.get_bucket_location(Bucket=bucket)["LocationConstraint"] return if region is None else region
Get region associated with a specified bucket name. :param bucket: the bucket to be checked. :return: region, Note that underlying AWS API returns None for default US-East-1, I'm replacing that with us-east-1.
388,789
def convert_attrs_to_uppercase(obj: Any, attrs: Iterable[str]) -> None: for a in attrs: value = getattr(obj, a) if value is None: continue setattr(obj, a, value.upper())
Converts the specified attributes of an object to upper case, modifying the object in place.
388,790
def calc_qdga2_v1(self): der = self.parameters.derived.fastaccess old = self.sequences.states.fastaccess_old new = self.sequences.states.fastaccess_new if der.kd2 <= 0.: new.qdga2 = new.qdgz2 elif der.kd2 > 1e200: new.qdga2 = old.qdga2+new.qdgz2-old.qdgz2 else: d_temp = (1.-modelutils.exp(-1./der.kd2)) new.qdga2 = (old.qdga2 + (old.qdgz2-old.qdga2)*d_temp + (new.qdgz2-old.qdgz2)*(1.-der.kd2*d_temp))
Perform the runoff concentration calculation for "fast" direct runoff. The working equation is the analytical solution of the linear storage equation under the assumption of constant change in inflow during the simulation time step. Required derived parameter: |KD2| Required state sequence: |QDGZ2| Calculated state sequence: |QDGA2| Basic equation: :math:`QDGA2_{neu} = QDGA2_{alt} + (QDGZ2_{alt}-QDGA2_{alt}) \\cdot (1-exp(-KD2^{-1})) + (QDGZ2_{neu}-QDGZ2_{alt}) \\cdot (1-KD2\\cdot(1-exp(-KD2^{-1})))` Examples: A normal test case: >>> from hydpy.models.lland import * >>> parameterstep() >>> derived.kd2(0.1) >>> states.qdgz2.old = 2.0 >>> states.qdgz2.new = 4.0 >>> states.qdga2.old = 3.0 >>> model.calc_qdga2_v1() >>> states.qdga2 qdga2(3.800054) First extreme test case (zero division is circumvented): >>> derived.kd2(0.0) >>> model.calc_qdga2_v1() >>> states.qdga2 qdga2(4.0) Second extreme test case (numerical overflow is circumvented): >>> derived.kd2(1e500) >>> model.calc_qdga2_v1() >>> states.qdga2 qdga2(5.0)
388,791
def walk(p, mode=, **kw): for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw): if mode in (, ): for dirname in dirnames: yield Path(dirpath).joinpath(dirname) if mode in (, ): for fname in filenames: yield Path(dirpath).joinpath(fname)
Wrapper for `os.walk`, yielding `Path` objects. :param p: root of the directory tree to walk. :param mode: 'all|dirs|files', defaulting to 'all'. :param kw: Keyword arguments are passed to `os.walk`. :return: Generator for the requested Path objects.
388,792
def _get_wms_wcs_url_parameters(request, date): params = { : str(request.bbox.reverse()) if request.bbox.crs is CRS.WGS84 else str(request.bbox), : MimeType.get_string(request.image_format), : CRS.ogc_string(request.bbox.crs), } if date is not None: start_date = date if request.time_difference < datetime.timedelta( seconds=0) else date - request.time_difference end_date = date if request.time_difference < datetime.timedelta( seconds=0) else date + request.time_difference params[] = .format(start_date.isoformat(), end_date.isoformat()) return params
Returns parameters common dictionary for WMS and WCS request. :param request: OGC-type request with specified bounding box, cloud coverage for specific product. :type request: OgcRequest or GeopediaRequest :param date: acquisition date or None :type date: datetime.datetime or None :return: dictionary with parameters :rtype: dict
388,793
async def on_raw_731(self, message): for nick in message.params[1].split(): self._destroy_user(nick, monitor_override=True) await self.on_user_offline(nickname)
Someone we are monitoring got offline.
388,794
def tree(node, formatter=None, prefix=None, postfix=None, _depth=1): current = 0 length = len(node.keys()) tee_joint = elbow_joint = for key, value in node.iteritems(): current += 1 k = formatter(key) if formatter else key pre = prefix(key) if prefix else post = postfix(key) if postfix else space = elbow_joint if current == length else tee_joint yield .format(space=space, key=k, prefix=pre, postfix=post) if value: for e in tree(value, formatter=formatter, prefix=prefix, postfix=postfix, _depth=_depth + 1): yield ( if current != length else ) + e
Print a tree. Sometimes it's useful to print datastructures as a tree. This function prints out a pretty tree with root `node`. A tree is represented as a :class:`dict`, whose keys are node names and values are :class:`dict` objects for sub-trees and :class:`None` for terminals. :param dict node: The root of the tree to print. :param callable formatter: A callable that takes a single argument, the key, that formats the key in the tree. :param callable prefix: A callable that takes a single argument, the key, that adds any additional text before the formatted key. :param callable postfix: A callable that takes a single argument, the key, that adds any additional text after the formatted key.
388,795
def input_yn(conf_mess): ui_erase_ln() ui_print(conf_mess) with term.cbreak(): input_flush() val = input_by_key() return bool(val.lower() == )
Print Confirmation Message and Get Y/N response from user.
388,796
def getParameters(self, postalAddress): address = u if postalAddress is not None: address = postalAddress.address return [ liveform.Parameter(, liveform.TEXT_INPUT, unicode, , default=address)]
Return a C{list} of one L{LiveForm} parameter for editing a L{PostalAddress}. @type postalAddress: L{PostalAddress} or C{NoneType} @param postalAddress: If not C{None}, an existing contact item from which to get the postal address default value. @rtype: C{list} @return: The parameters necessary for specifying a postal address.
388,797
def _nonmatch_class_pos(self): if self.kernel.classes_.shape[0] != 2: raise ValueError("Number of classes is {}, expected 2.".format( self.kernel.classes_.shape[0])) return 0
Return the position of the non-match class.
388,798
def maybe_convert_values(self, identifier: Identifier, data: Dict[str, Any], ) -> Dict[str, Any]: raise NotImplementedError
Takes a dictionary of raw values for a specific identifier, as parsed from the YAML file, and depending upon the type of db column the data is meant for, decides what to do with the value (eg leave it alone, convert a string to a date/time instance, or convert identifiers to model instances by calling :meth:`self.loader.convert_identifiers`) :param identifier: An object with :attr:`class_name` and :attr:`key` attributes :param data: A dictionary keyed by column name, with values being the raw values as parsed from the YAML :return: A dictionary keyed by column name, with values being the converted values meant to be set on the model instance
388,799
def toxml(self): return .format(self.name) +\ (.format(self.dimension) if self.dimension else ) +\ (.format(self.default_value) if self.default_value else ) +\
Exports this object into a LEMS XML object