code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def save_data(self,session, exp_id, content): from expfactory.database.models import ( Participant, Result ) subid = session.get() token = session.get() self.logger.info( % subid) if subid is not None: p = Participant.query.filter(Participant.id == subid).first() if self.headless and p.token != token: self.logger.warning( %(p.id, token)) elif self.headless and p.token.endswith((,)): self.logger.warning( %(p.id, token)) else: if "data" in content: content = content[] result = Result(data=content, exp_id=exp_id, participant_id=p.id) self.session.add(result) p.results.append(result) self.session.commit() self.logger.info("Save [participant] %s [result] %s" %(p, result))
save data will obtain the current subid from the session, and save it depending on the database type. Currently we just support flat files
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data, headers=(" support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name)) for c in convert.SUBSET_BY_SUPPORT["cnvkit"]] support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)] if len(support_files) == 0: return input_file else: out_file = os.path.join(work_dir, "%s-havesupport%s" % utils.splitext_plus(os.path.basename(input_file))) if not utils.file_uptodate(out_file, input_file): input_bed = _input_to_bed(input_file, work_dir, get_coords, headers) pass_coords = set([]) with file_transaction(data, out_file) as tx_out_file: support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files]) tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0] cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}" do.run(cmd.format(**locals()), "Intersect CNVs with support files") for r in pybedtools.BedTool(tmp_cmp_bed): pass_coords.add((str(r.chrom), str(r.start), str(r.stop))) with open(input_file) as in_handle: with open(tx_out_file, "w") as out_handle: for line in in_handle: passes = True if not line.startswith(headers): passes = get_coords(line) in pass_coords if passes: out_handle.write(line) return out_file
Limit CNVkit input to calls with support from another caller. get_coords is a function that return chrom, start, end from a line of the input_file, allowing handling of multiple input file types.
def add_special(self, name): self.undeclared.discard(name) self.declared.add(name)
Register a special name like `loop`.
def enable_i2c(self): config = self._interface_configuration(CONFIG_QUERY) return config == CONFIG_GPIO_I2C or config == CONFIG_SPI_I2C
Set this to `True` to enable the hardware I2C interface. If set to `False` the hardware interface will be disabled and its pins (SDA and SCL) can be used as GPIOs.
def get_user_data_iobject(user=None,group=None,data_kind=DINGOS_USER_DATA_TYPE_NAME): logger.debug("Get user settings called") if not user.is_authenticated(): user = None try: user_config = UserData.objects.get(user=user,group=group,data_kind=data_kind) return user_config.identifier.latest except: return None
Returns either stored settings of a given user or default settings. This behavior reflects the need for views to have some settings at hand when running. The settings are returned as dict object.
def cached_idxs(method): def method_wrapper(self,*args,**kwargs): tail = .join(str(idx) for idx in args) _cache_attr_name = +method.__name__++tail _bool_attr_name = +method.__name__++tail is_cached = getattr(self,_bool_attr_name) if not is_cached: result = method(self, *args, **kwargs) setattr(self, _cache_attr_name, result) setattr(self, _bool_attr_name, True) return getattr(self,_cache_attr_name) return method_wrapper
this function is used as a decorator for caching
def indent(instr,nspaces=4, ntabs=0, flatten=False): if instr is None: return ind = *ntabs+*nspaces if flatten: pat = re.compile(r, re.MULTILINE) else: pat = re.compile(r, re.MULTILINE) outstr = re.sub(pat, ind, instr) if outstr.endswith(os.linesep+ind): return outstr[:-len(ind)] else: return outstr
Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- str|unicode : string indented by ntabs and nspaces.
def to_lonlat(xtile, ytile, zoom): n = 2.0 ** zoom lon = xtile / n * 360.0 - 180.0 try: lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))) except OverflowError: raise ValueError( % zoom) lat = math.degrees(lat_rad) return lon, lat
Returns a tuple of (longitude, latitude) from a map tile xyz coordinate. See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2 Arguments: xtile - x tile location as int or float ytile - y tile location as int or float zoom - zoom level as int or float
def _connect(self): try: connection = self._pool_manager.get(self.pid, self) LOGGER.debug("Re-using connection for %s", self.pid) except pool.NoIdleConnectionsError: if self._pool_manager.is_full(self.pid): raise kwargs = utils.uri_to_kwargs(self._uri) LOGGER.debug("Creating a new connection for %s", self.pid) connection = self._psycopg2_connect(kwargs) self._pool_manager.add(self.pid, connection) self._pool_manager.lock(self.pid, connection, self) if utils.PYPY: connection.reset() self._register_unicode(connection) self._register_uuid(connection) return connection
Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError
def _factory(slice_, axis, weighted): if slice_.dim_types[0] == DT.MR_SUBVAR: return _MrXCatPairwiseSignificance(slice_, axis, weighted) return _CatXCatPairwiseSignificance(slice_, axis, weighted)
return subclass for PairwiseSignificance, based on slice dimension types.
def write_fits(data, header, file_name): hdu = fits.PrimaryHDU(data) hdu.header = header hdulist = fits.HDUList([hdu]) hdulist.writeto(file_name, overwrite=True) logging.info("Wrote {0}".format(file_name)) return
Combine data and a fits header to write a fits file. Parameters ---------- data : numpy.ndarray The data to be written. header : astropy.io.fits.hduheader The header for the fits file. file_name : string The file to write Returns ------- None
def update_channels(self): self.interlock_channel = -1 self.override_channel = -1 self.zero_I_channel = -1 self.no_vtol_channel = -1 self.rsc_out_channel = 9 self.fwd_thr_channel = 10 for ch in range(1,16): option = self.get_mav_param("RC%u_OPTION" % ch, 0) if option == 32: self.interlock_channel = ch; elif option == 63: self.override_channel = ch; elif option == 64: self.zero_I_channel = ch; elif option == 65: self.override_channel = ch; elif option == 66: self.no_vtol_channel = ch; function = self.get_mav_param("SERVO%u_FUNCTION" % ch, 0) if function == 32: self.rsc_out_channel = ch if function == 70: self.fwd_thr_channel = ch
update which channels provide input
def declination_spencer71(dayofyear): day_angle = _calculate_simple_day_angle(dayofyear) return ( 0.006918 - 0.399912 * np.cos(day_angle) + 0.070257 * np.sin(day_angle) - 0.006758 * np.cos(2. * day_angle) + 0.000907 * np.sin(2. * day_angle) - 0.002697 * np.cos(3. * day_angle) + 0.00148 * np.sin(3. * day_angle) )
Solar declination from Duffie & Beckman [1] and attributed to Spencer (1971) and Iqbal (1983). .. warning:: Return units are radians, not degrees. Parameters ---------- dayofyear : numeric Returns ------- declination (radians) : numeric Angular position of the sun at solar noon relative to the plane of the equator, approximately between +/-23.45 (degrees). References ---------- [1] J. A. Duffie and W. A. Beckman, "Solar Engineering of Thermal Processes, 3rd Edition" pp. 13-14, J. Wiley and Sons, New York (2006) [2] J. W. Spencer, "Fourier series representation of the position of the sun" in Search 2 (5), p. 172 (1971) [3] Daryl R. Myers, "Solar Radiation: Practical Modeling for Renewable Energy Applications", p. 4 CRC Press (2013) See Also -------- declination_cooper69
def pull_3year(self): events = [] for term in ["fall", "summer", "spring"]: url = "{}{}{}term.ics".format(BASE_URL, datetime.datetime.now().year, term) resp = requests.get(url) resp.raise_for_status() r = resp.text lines = r.split("\n") d = {} for line in lines: if line == "BEGIN:VEVENT": d = {} elif line.startswith("DTSTART"): raw_date = line.split(":")[1] start_date = datetime.datetime.strptime(raw_date, ).date() d[] = start_date.strftime() elif line.startswith("DTEND"): raw_date = line.split(":")[1] end_date = datetime.datetime.strptime(raw_date, ).date() d[] = end_date.strftime() elif line.startswith("SUMMARY"): name = line.split(":")[1] d[] = str(name).strip() elif line == "END:VEVENT": events.append(d) events.sort(key=lambda d: d[]) return events
Returns a list (in JSON format) containing all the events from the Penn iCal Calendar. List contains events in chronological order. Each element of the list is a dictionary, containing: - Name of the event 'name' - Start date 'start' - End date 'end'
def validateOneElement(self, doc, elem): if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o) return ret
Try to validate a single element and it's attributes, basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC: Required Attribute ] Then call xmlValidateOneAttribute() for each attribute present. The ID/IDREF checkings are done separately
def idle_task(self): try: data = self.port.recv(200) except socket.error as e: if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]: return raise if len(data) > 110: print("DGPS data too large: %u bytes" % len(data)) return try: self.master.mav.gps_inject_data_send( self.target_system, self.target_component, len(data), bytearray(data.ljust(110, ))) except Exception(e): print ("DGPS Failed:", e)
called in idle time
def validate(self, size): msg = \ if size != len(self.scale): raise ValueError(msg.format(**locals()))
Ensure that the size of the dimension matches the number of bands in the scale Raises: ValueError: when the dimension size and number of bands don't match
def _initiate_starttls(self, **kwargs): if self._tls_state == "connected": raise RuntimeError("Already TLS-connected") kwargs["do_handshake_on_connect"] = False logger.debug("Wrapping the socket into ssl") self._socket = ssl.wrap_socket(self._socket, **kwargs) self._set_state("tls-handshake") self._continue_tls_handshake()
Initiate starttls handshake over the socket.
def next(self): batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3)) i = self.cur for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)): str_img = open(self.root+self.list[i]+).read() img = imdecode(str_img, 1) img, _ = random_crop(img, self.size) batch[i - self.cur] = img batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2)) ret = mx.io.DataBatch(data=[batch], label=[], pad=self.batch_size-(i-self.cur), index=None) self.cur = i return ret
Move iterator position forward
def add_key(self): "Add ssh key to gitlab if necessary" try: with open(self.args.ssh_public_key) as f: public_key = f.read().strip() except: log.debug("No key found in {}".format(self.args.ssh_public_key)) return None g = self.gitlab url = g[] + "/user/keys" query = {: g[]} keys = requests.get(url, params=query).json() log.debug("looking for in " + str(keys)) if (list(filter(lambda key: key[] == public_key, keys))): log.debug(self.args.ssh_public_key + " already exists") return None else: name = log.info("add " + name + " ssh public key from " + self.args.ssh_public_key) query[] = name query[] = public_key result = requests.post(url, query) if result.status_code != requests.codes.created: log.warn( .format(self.args.ssh_public_key)) return public_key
Add ssh key to gitlab if necessary
def _get_perspective_coeffs(startpoints, endpoints): matrix = [] for p1, p2 in zip(endpoints, startpoints): matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) A = torch.tensor(matrix, dtype=torch.float) B = torch.tensor(startpoints, dtype=torch.float).view(8) res = torch.gels(B, A)[0] return res.squeeze_(1).tolist()
Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms. In Perspective Transform each pixel (x, y) in the orignal image gets transformed as, (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) ) Args: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image, List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image Returns: octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
def process_frames_mouth(self, frames): self.face = np.array(frames) self.mouth = np.array(frames) self.set_data(frames)
Preprocess from frames using mouth detector
def to_api(in_dict, int_keys=None, date_keys=None, bool_keys=None): if int_keys: for in_key in int_keys: if (in_key in in_dict) and (in_dict.get(in_key, None) is not None): in_dict[in_key] = int(in_dict[in_key]) if date_keys: for in_key in date_keys: if (in_key in in_dict) and (in_dict.get(in_key, None) is not None): _from = in_dict[in_key] if isinstance(_from, basestring): dtime = parse_datetime(_from) elif isinstance(_from, datetime): dtime = _from in_dict[in_key] = dtime.isoformat() elif (in_key in in_dict) and in_dict.get(in_key, None) is None: del in_dict[in_key] for k, v in in_dict.items(): if v is None: del in_dict[k] return in_dict
Extends a given object for API Production.
def load_settings(self, daemon_config, context, origin=None): self._connector.setup_xray_client(ip=daemon_config.tcp_ip, port=daemon_config.tcp_port, client=self.xray_client) self._connector.context = context self._origin = origin
The pollers have dependency on the context manager of the X-Ray recorder. They will respect the customer specified xray client to poll sampling rules/targets. Otherwise they falls back to use the same X-Ray daemon as the emitter.
def shepp_logan_ellipsoids(ndim, modified=False): if ndim == 2: ellipsoids = _shepp_logan_ellipse_2d() elif ndim == 3: ellipsoids = _shepp_logan_ellipsoids_3d() else: raise ValueError() if modified: _modified_shepp_logan_ellipsoids(ellipsoids) return ellipsoids
Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions. Parameters ---------- ndim : {2, 3} Dimension of the space the ellipsoids should be in. modified : bool, optional True if the modified Shepp-Logan phantom should be given. The modified phantom has greatly amplified contrast to aid visualization. See Also -------- odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms shepp_logan : Create a phantom with these ellipsoids References ---------- .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
def GetCalendarFieldValuesTuple(self): if self.start_date and self.end_date: return [getattr(self, fn) for fn in self._FIELD_NAMES]
Return the tuple of calendar.txt values or None if this ServicePeriod should not be in calendar.txt .
def get_version(package): init_py = codecs.open(os.path.join(package, ), encoding=).read() return re.search("^__version__ = [\"]+)['\"]", init_py, re.MULTILINE).group(1)
Return package version as listed in `__version__` in `init.py`.
def setOverlayTexture(self, ulOverlayHandle): fn = self.function_table.setOverlayTexture pTexture = Texture_t() result = fn(ulOverlayHandle, byref(pTexture)) return result, pTexture
Texture to draw for the overlay. This function can only be called by the overlay's creator or renderer process (see SetOverlayRenderingPid) . * OpenGL dirty state: glBindTexture
def status(self): peer = random.choice(self.PEERS) formatted_peer = .format(peer) peerdata = requests.get(url=formatted_peer + ).json()[] peers_status = {} networkheight = max([x[] for x in peerdata]) for i in peerdata: if .format(i[]) in self.PEERS: peers_status.update({i[]: { : i[], : i[], : i[], : i[], }}) return { : networkheight, : peers_status }
check the status of the network and the peers :return: network_height, peer_status
def event_listeners(self): return (self.__command_listeners[:], self.__server_heartbeat_listeners[:], self.__server_listeners[:], self.__topology_listeners[:])
List of registered event listeners.
def transpose(self, name=None, output_channels=None, kernel_shapes=None, strides=None, paddings=None, activation=None, activate_final=None, normalization_ctor=None, normalization_kwargs=None, normalize_final=None, initializers=None, partitioners=None, regularizers=None, use_batch_norm=None, use_bias=None, batch_norm_config=None, data_format=None, custom_getter=None): for rate in self._rates: if rate != 1: raise NotImplementedError("Transpose dilated convolutions " "are not supported") output_shapes = [] if data_format is None: data_format = self._data_format if data_format == DATA_FORMAT_NHWC: start_dim, end_dim = 1, -1 elif data_format == DATA_FORMAT_NCHW: start_dim, end_dim = 2, 4 else: raise ValueError("Invalid data_format {:s}. Allowed formats " "{}".format(data_format, SUPPORTED_2D_DATA_FORMATS)) if custom_getter is None and self._custom_getter is not None: tf.logging.warning( "This convnet was constructed with a custom getter, but the " "`transpose` method was not given any. The transposed ConvNet will " "not be using any custom_getter.") for layer in reversed(self._layers): output_shapes.append(lambda l=layer: l.input_shape[start_dim:end_dim]) transpose_constructor = functools.partial(ConvNet2DTranspose, output_shapes=output_shapes, custom_getter=custom_getter) return self._transpose( transpose_constructor=transpose_constructor, name=name, output_channels=output_channels, kernel_shapes=kernel_shapes, strides=strides, paddings=paddings, activation=activation, activate_final=activate_final, normalization_ctor=normalization_ctor, normalization_kwargs=normalization_kwargs, normalize_final=normalize_final, initializers=initializers, partitioners=partitioners, regularizers=regularizers, use_bias=use_bias, data_format=data_format)
Returns transposed version of this network. Args: name: Optional string specifying the name of the transposed module. The default name is constructed by appending "_transpose" to `self.module_name`. output_channels: Optional iterable of numbers of output channels. kernel_shapes: Optional iterable of kernel sizes. The default value is constructed by reversing `self.kernel_shapes`. strides: Optional iterable of kernel strides. The default value is constructed by reversing `self.strides`. paddings: Optional iterable of padding options, either `snt.SAME` or `snt.VALID`; The default value is constructed by reversing `self.paddings`. activation: Optional activation op. Default value is `self.activation`. activate_final: Optional boolean determining if the activation and batch normalization, if turned on, are applied to the final layer. normalization_ctor: Constructor to return a callable which will perform normalization at each layer. Defaults to None / no normalization. Examples of what could go here: `snt.BatchNormV2`, `snt.LayerNorm`. If a string is provided, importlib is used to convert the string to a callable, so either `snt.LayerNorm` or `"snt.LayerNorm"` can be provided. normalization_kwargs: kwargs to be provided to `normalization_ctor` when it is called. normalize_final: Whether to apply normalization after the final conv layer. Default is to take the value of activate_final. initializers: Optional dict containing ops to initialize the filters of the whole network (with key 'w') or biases (with key 'b'). The default value is `self.initializers`. partitioners: Optional dict containing partitioners to partition weights (with key 'w') or biases (with key 'b'). The default value is `self.partitioners`. regularizers: Optional dict containing regularizers for the filters of the whole network (with key 'w') or biases (with key 'b'). The default is `self.regularizers`. use_batch_norm: Optional boolean determining if batch normalization is applied after convolution. The default value is `self.use_batch_norm`. use_bias: Optional boolean or iterable of booleans determining whether to include bias parameters in the convolutional layers. Default is constructed by reversing `self.use_bias`. batch_norm_config: Optional mapping of additional configuration for the `snt.BatchNorm` modules. Default is `self.batch_norm_config`. data_format: Optional string, one of "NCHW" or "NHWC". Specifies whether the channel dimension of the input and output is the last dimension. Default is `self._data_format`. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. Returns: Matching `ConvNet2DTranspose` module. Raises: ValueError: If output_channels is specified and its length does not match the number of layers. ValueError: If the given data_format is not a supported format ("NHWC" or "NCHW"). NotImplementedError: If the convolutions are dilated.
def generate_ssh_key(): key = rsa.generate_private_key( backend=default_backend(), public_exponent=65537, key_size=4096 ) private_key = key.private_bytes( serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()) public_key = key.public_key().public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH ) return private_key, public_key
Generates an SSH deploy public and private key. Returns (private key, public key), a tuple of byte strings.
def create_api_stage(restApiId, stageName, deploymentId, description=, cacheClusterEnabled=False, cacheClusterSize=, variables=None, region=None, key=None, keyid=None, profile=None): 0.5{"name": "value"} try: variables = dict() if variables is None else variables conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) stage = conn.create_stage(restApiId=restApiId, stageName=stageName, deploymentId=deploymentId, description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize, variables=variables) return {: True, : _convert_datetime_str(stage)} except ClientError as e: return {: False, : __utils__[](e)}
Creates a new API stage for a given restApiId and deploymentId. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\ description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
def transform(self, jam): for output in self.__recursive_transform(jam, self.steps): yield output
Apply the sequence of transformations to a single jam object. Parameters ---------- jam : jams.JAMS The jam object to transform Yields ------ jam_out : jams.JAMS The jam objects produced by the transformation sequence
def power_corr(r=None, n=None, power=None, alpha=0.05, tail=): n_none = sum([v is None for v in [r, n, power, alpha]]) if n_none != 1: raise ValueError() if r is not None: assert -1 <= r <= 1 r = abs(r) if alpha is not None: assert 0 < alpha <= 1 if power is not None: assert 0 < power <= 1 if n is not None: assert n > 4 if tail == : def func(r, n, power, alpha): dof = n - 2 ttt = stats.t.ppf(1 - alpha / 2, dof) rc = np.sqrt(ttt**2 / (ttt**2 + dof)) zr = np.arctanh(r) + r / (2 * (n - 1)) zrc = np.arctanh(rc) power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) + \ stats.norm.cdf((-zr - zrc) * np.sqrt(n - 3)) return power else: def func(r, n, power, alpha): dof = n - 2 ttt = stats.t.ppf(1 - alpha, dof) rc = np.sqrt(ttt**2 / (ttt**2 + dof)) zr = np.arctanh(r) + r / (2 * (n - 1)) zrc = np.arctanh(rc) power = stats.norm.cdf((zr - zrc) * np.sqrt(n - 3)) return power if power is None and n is not None and r is not None: return func(r, n, power=None, alpha=alpha) elif n is None and power is not None and r is not None: def _eval_n(n, r, power, alpha): return func(r, n, power, alpha) - power try: return brenth(_eval_n, 4 + 1e-10, 1e+09, args=(r, power, alpha)) except ValueError: return np.nan elif r is None and power is not None and n is not None: def _eval_r(r, n, power, alpha): return func(r, n, power, alpha) - power try: return brenth(_eval_r, 1e-10, 1 - 1e-10, args=(n, power, alpha)) except ValueError: return np.nan else: def _eval_alpha(alpha, r, n, power): return func(r, n, power, alpha) - power try: return brenth(_eval_alpha, 1e-10, 1 - 1e-10, args=(r, n, power)) except ValueError: return np.nan
Evaluate power, sample size, correlation coefficient or significance level of a correlation test. Parameters ---------- r : float Correlation coefficient. n : int Number of observations (sample size). power : float Test power (= 1 - type II error). alpha : float Significance level (type I error probability). The default is 0.05. tail : str Indicates whether the test is "two-sided" or "one-sided". Notes ----- Exactly ONE of the parameters ``r``, ``n``, ``power`` and ``alpha`` must be passed as None, and that parameter is determined from the others. Notice that ``alpha`` has a default value of 0.05 so None must be explicitly passed if you want to compute it. :py:func:`scipy.optimize.brenth` is used to solve power equations for other variables (i.e. sample size, effect size, or significance level). If the solving fails, a nan value is returned. This function is a mere Python translation of the original `pwr.r.test` function implemented in the `pwr` R package. All credit goes to the author, Stephane Champely. References ---------- .. [1] Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum. .. [2] https://cran.r-project.org/web/packages/pwr/pwr.pdf Examples -------- 1. Compute achieved power given ``r``, ``n`` and ``alpha`` >>> from pingouin import power_corr >>> print('power: %.4f' % power_corr(r=0.5, n=20)) power: 0.6379 2. Compute required sample size given ``r``, ``power`` and ``alpha`` >>> print('n: %.4f' % power_corr(r=0.5, power=0.80, ... tail='one-sided')) n: 22.6091 3. Compute achieved ``r`` given ``n``, ``power`` and ``alpha`` level >>> print('r: %.4f' % power_corr(n=20, power=0.80, alpha=0.05)) r: 0.5822 4. Compute achieved alpha level given ``r``, ``n`` and ``power`` >>> print('alpha: %.4f' % power_corr(r=0.5, n=20, power=0.80, ... alpha=None)) alpha: 0.1377
def data(self, **query): objects = self.cache[] data = self.api.data.get(**query)[] data_objects = [] for d in data: _id = d[] if _id in objects: objects[_id].update(d) else: objects[_id] = GenData(d, self) data_objects.append(objects[_id]) for d in data_objects: count += 1 while True: ref_annotation = {} remove_annotation = [] for path, ann in d.annotation.items(): if ann[].startswith(): _id = ann[] if _id not in objects: try: d_tmp = self.api.data(_id).get() except slumber.exceptions.HttpClientError as ex: if ex.response.status_code == 404: continue else: raise ex objects[_id] = GenData(d_tmp, self) annotation = objects[_id].annotation ref_annotation.update({path + + k: v for k, v in annotation.items()}) remove_annotation.append(path) if ref_annotation: d.annotation.update(ref_annotation) for path in remove_annotation: del d.annotation[path] else: break return data_objects
Query for Data object annotation.
def file_signature(filename): if not os.path.isfile(filename): return None if not os.path.exists(filename): return None sig = hashlib.sha1() with open(filename, "rb") as f: buf = f.read() sig.update(buf) return sig.hexdigest()
Return a signature for a file.
def prepare_shell_data(self, shells, key, entry): if self.can_process_shell(entry): if key in []: entry[] = key if in entry and isinstance(entry[], str): rendered_with = ast.literal_eval(render(entry[], variables=self.pipeline.variables, model=self.pipeline.model, env=self.get_merged_env(include_os=True))) elif in entry: rendered_with = entry[] else: rendered_with = [] for item in rendered_with: shells.append({ : self.next_task_id, : key, : entry, : self.pipeline.model, : self.get_merged_env(), : item, : self.pipeline.options.dry_run, : self.pipeline.options.debug, : self.pipeline.options.strict, : self.pipeline.variables, : self.pipeline.options.temporary_scripts_path}) self.next_task_id += 1
Prepare one shell or docker task.
def ws_connect(message): prefix, language = message[].strip().split() gr = Group(.format(language)) gr.add(message.reply_channel) message.channel_session[] = language message.reply_channel.send({"accept": True})
Channels connection setup. Register the current client on the related Group according to the language
def expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None: edge_filter = and_edge_predicates(edge_predicates) for u, v in itt.product(graph, repeat=2): if graph.has_edge(u, v) or not universe.has_edge(u, v): continue rs = defaultdict(list) for key, data in universe[u][v].items(): if not edge_filter(universe, u, v, key): continue rs[data[RELATION]].append((key, data)) if 1 == len(rs): relation = list(rs)[0] for key, data in rs[relation]: graph.add_edge(u, v, key=key, **data) else: log.debug(, u, v)
Edges between entities in the sub-graph that pass the given filters. :param universe: The full graph :param graph: A sub-graph to find the upstream information :param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool
def dump( state, host, remote_filename, database=None, mysql_user=None, mysql_password=None, mysql_host=None, mysql_port=None, ): yield .format(make_mysql_command( executable=, database=database, user=mysql_user, password=mysql_password, host=mysql_host, port=mysql_port, ), remote_filename)
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``. + database: name of the database to dump + remote_filename: name of the file to dump the SQL to + mysql_*: global module arguments, see above
def heartbeat(self): super(SchedulerMetricsJob, self).heartbeat() session = settings.Session() TI = TaskInstance successful_tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .filter(TI.state.in_([State.SUCCESS])) .all() ) session.commit() dagbag = DagBag(SUBDIR) dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS] num_task_instances = sum([(timezone.utcnow() - task.start_date).days for dag in dags for task in dag.tasks]) if (len(successful_tis) == num_task_instances or (timezone.utcnow() - self.start_date).total_seconds() > MAX_RUNTIME_SECS): if len(successful_tis) == num_task_instances: self.log.info("All tasks processed! Printing stats.") else: self.log.info("Test timeout reached. Printing available stats.") self.print_stats() set_dags_paused_state(True) sys.exit()
Override the scheduler heartbeat to determine when the test is complete
def create(self, create_missing=None): attrs = self.create_json(create_missing) return Location(self._server_config, id=attrs[]).read()
Manually fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1216236 <https://bugzilla.redhat.com/show_bug.cgi?id=1216236>`_.
def search_asn(self, auth, query, search_options=None): if search_options is None: search_options = {} if not in search_options: search_options[] = 50 else: try: search_options[] = int(search_options[]) except (ValueError, TypeError): raise NipapValueError( + max_result) if not in search_options: search_options[] = 0 else: try: search_options[] = int(search_options[]) except (ValueError, TypeError): raise NipapValueError( + offset) self._logger.debug( % unicode(search_options)) opt = None sql = if query != {}: where, opt = self._expand_asn_query(query) sql += " WHERE " + where sql += " ORDER BY asn LIMIT " + unicode(search_options[]) self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) return { : search_options, : result }
Search ASNs for entries matching 'query' * `auth` [BaseAuth] AAA options. * `query` [dict_to_sql] How the search should be performed. * `search_options` [options_dict] Search options, see below. Returns a list of dicts. The `query` argument passed to this function is designed to be able to specify how quite advanced search operations should be performed in a generic format. It is internally expanded to a SQL WHERE-clause. The `query` is a dict with three elements, where one specifies the operation to perform and the two other specifies its arguments. The arguments can themselves be `query` dicts, to build more complex queries. The :attr:`operator` key specifies what operator should be used for the comparison. Currently the following operators are supported: * :data:`and` - Logical AND * :data:`or` - Logical OR * :data:`equals` - Equality; = * :data:`not_equals` - Inequality; != * :data:`like` - SQL LIKE * :data:`regex_match` - Regular expression match * :data:`regex_not_match` - Regular expression not match The :attr:`val1` and :attr:`val2` keys specifies the values which are subjected to the comparison. :attr:`val1` can be either any prefix attribute or an entire query dict. :attr:`val2` can be either the value you want to compare the prefix attribute to, or an entire `query` dict. The search options can also be used to limit the number of rows returned or set an offset for the result. The following options are available: * :attr:`max_result` - The maximum number of prefixes to return (default :data:`50`). * :attr:`offset` - Offset the result list this many prefixes (default :data:`0`). This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.search_tag` for full understanding.
def get_bitcoind_client(): bitcoind_opts = get_bitcoin_opts() bitcoind_host = bitcoind_opts[] bitcoind_port = bitcoind_opts[] bitcoind_user = bitcoind_opts[] bitcoind_passwd = bitcoind_opts[] return create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port)
Connect to the bitcoind node
def sum_distances(self, indices, distance_matrix): combs_tup = np.array(tuple(combinations(indices, 2))) combs = np.array([[i[0] for i in combs_tup], [i[1] for i in combs_tup]]) dist = np.sqrt( np.sum(np.square(distance_matrix[combs[0], combs[1]]), axis=0)) return dist
Calculate combinatorial distance between a select group of trajectories, indicated by indices Arguments --------- indices : tuple distance_matrix : numpy.ndarray (M,M) Returns ------- numpy.ndarray Notes ----- This function can perhaps be quickened by calculating the sum of the distances. The calculated distances, as they are right now, are only used in a relative way. Purely summing distances would lead to the same result, at a perhaps quicker rate.
def os_path_to_client_path(self, os_path): if os.path.sep == : return os_path return os_path.replace(, ).replace(os.path.sep, )
Converts an operating system path into a client path by replacing instances of os.path.sep with '/'. Note: If the client path contains any instances of '/' already, they will be replaced with '-'.
def yank_last_arg(event): n = (event.arg if event.arg_present else None) event.current_buffer.yank_last_arg(n)
Like `yank_nth_arg`, but if no argument has been given, yank the last word of each line.
def do_not_track(): def decorator(f): @functools.wraps(f) def func(*args, **kwargs): request.prom_do_not_track = True return f(*args, **kwargs) return func return decorator
Decorator to skip the default metrics collection for the method. *Note*: explicit metrics decorators will still collect the data
def num_samples(input_filepath): validate_input_file(input_filepath) output = soxi(input_filepath, ) if output == : logger.warning("Number of samples unavailable for %s", input_filepath) return int(output)
Show number of samples (0 if unavailable). Parameters ---------- input_filepath : str Path to audio file. Returns ------- n_samples : int total number of samples in audio file. Returns 0 if empty or unavailable
def decode_and_resize(image_str_tensor): height = 299 width = 299 channels = 3 image = tf.image.decode_jpeg(image_str_tensor, channels=channels) image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [height, width], align_corners=False) image = tf.squeeze(image, squeeze_dims=[0]) image = tf.cast(image, dtype=tf.uint8) return image
Decodes jpeg string, resizes it and returns a uint8 tensor.
def get_variable_values( schema, definition_asts, inputs, ): if inputs is None: inputs = {} values = {} for def_ast in definition_asts: var_name = def_ast.variable.name.value var_type = type_from_ast(schema, def_ast.type) value = inputs.get(var_name) if not is_input_type(var_type): raise GraphQLError( .format( var_name=var_name, var_type=print_ast(def_ast.type) ), [def_ast], ) elif value is None: if def_ast.default_value is not None: values[var_name] = value_from_ast( def_ast.default_value, var_type ) if isinstance(var_type, GraphQLNonNull): raise GraphQLError( .format( var_name=var_name, var_type=var_type ), [def_ast], ) else: errors = is_valid_value(value, var_type) if errors: message = u"\n" + u"\n".join(errors) raise GraphQLError( .format( var_name, json.dumps(value, sort_keys=True), message ), [def_ast], ) coerced_value = coerce_value(var_type, value) if coerced_value is None: raise Exception("Should have reported error.") values[var_name] = coerced_value return values
Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input. If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown.
def array_info(self, dump=None, paths=None, attrs=True, standardize_dims=True, pwd=None, use_rel_paths=True, alternative_paths={}, ds_description={, }, full_ds=True, copy=False, **kwargs): saved_ds = kwargs.pop(, {}) def get_alternative(f): return next(filter(lambda t: osp.samefile(f, t[0]), six.iteritems(alternative_paths)), [False, f]) if copy: def copy_obj(obj): try: num = obj.psy.num except AttributeError: pass else: try: return saved_ds[num] except KeyError: saved_ds[num] = obj.psy.copy(True) return saved_ds[num] return obj.psy.copy(True) else: def copy_obj(obj): return obj ret = OrderedDict() if ds_description == : ds_description = {, , , , } if paths is not None: if dump is None: dump = True paths = iter(paths) elif dump is None: dump = False if pwd is None: pwd = getcwd() for arr in self: if isinstance(arr, InteractiveList): ret[arr.arr_name] = arr.array_info( dump, paths, pwd=pwd, attrs=attrs, standardize_dims=standardize_dims, use_rel_paths=use_rel_paths, ds_description=ds_description, alternative_paths=alternative_paths, copy=copy, _saved_ds=saved_ds, **kwargs) else: if standardize_dims: idims = arr.psy.decoder.standardize_dims( next(arr.psy.iter_base_variables), arr.psy.idims) else: idims = arr.psy.idims ret[arr.psy.arr_name] = d = {: idims} if in arr.coords: d[] = [list(arr.coords[].values)] else: d[] = arr.name if in ds_description or in ds_description: fname, store_mod, store_cls = get_filename_ds( arr.psy.base, dump=dump, paths=paths, **kwargs) if in ds_description: d[] = (store_mod, store_cls) if in ds_description: d[] = [] for i, f in enumerate(safe_list(fname)): if (f is None or utils.is_remote_url(f)): d[].append(f) else: found, f = get_alternative(f) if use_rel_paths: f = osp.relpath(f, pwd) else: f = osp.abspath(f) d[].append(f) if fname is None or isinstance(fname, six.string_types): d[] = d[][0] else: d[] = tuple(safe_list(fname)) if arr.psy.base.psy._concat_dim is not None: d[] = arr.psy.base.psy._concat_dim if in ds_description: if full_ds: d[] = copy_obj(arr.psy.base) else: d[] = copy_obj(arr.to_dataset()) if in ds_description: d[] = arr.psy.base.psy.num if in ds_description: d[] = copy_obj(arr) if attrs: d[] = arr.attrs ret[] = self.attrs return ret
Get dimension informations on you arrays This method returns a dictionary containing informations on the array in this instance Parameters ---------- dump: bool If True and the dataset has not been dumped so far, it is dumped to a temporary file or the one generated by `paths` is used. If it is False or both, `dump` and `paths` are None, no data will be stored. If it is None and `paths` is not None, `dump` is set to True. %(get_filename_ds.parameters.no_ds|dump)s attrs: bool, optional If True (default), the :attr:`ArrayList.attrs` and :attr:`xarray.DataArray.attrs` attributes are included in the returning dictionary standardize_dims: bool, optional If True (default), the real dimension names in the dataset are replaced by x, y, z and t to be more general. pwd: str Path to the working directory from where the data can be imported. If None, use the current working directory. use_rel_paths: bool, optional If True (default), paths relative to the current working directory are used. Otherwise absolute paths to `pwd` are used ds_description: 'all' or set of {'fname', 'ds', 'num', 'arr', 'store'} Keys to describe the datasets of the arrays. If all, all keys are used. The key descriptions are fname the file name is inserted in the ``'fname'`` key store the data store class and module is inserted in the ``'store'`` key ds the dataset is inserted in the ``'ds'`` key num The unique number assigned to the dataset is inserted in the ``'num'`` key arr The array itself is inserted in the ``'arr'`` key full_ds: bool If True and ``'ds'`` is in `ds_description`, the entire dataset is included. Otherwise, only the DataArray converted to a dataset is included copy: bool If True, the arrays and datasets are deep copied Other Parameters ---------------- %(get_filename_ds.other_parameters)s Returns ------- OrderedDict An ordered mapping from array names to dimensions and filename corresponding to the array See Also -------- from_dict
def phaseshift_isc(data, pairwise=False, summary_statistic=, n_shifts=1000, tolerate_nans=True, random_state=None): data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) observed = isc(data, pairwise=pairwise, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) distribution = [] for i in np.arange(n_shifts): if isinstance(random_state, np.random.RandomState): prng = random_state else: prng = np.random.RandomState(random_state) shifted_data = phase_randomize(data, random_state=prng) if pairwise: shifted_isc = isc(shifted_data, pairwise=True, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) elif not pairwise: shifted_data = np.rollaxis(shifted_data, 2, 0) shifted_isc = [] for s, shifted_subject in enumerate(shifted_data): nonshifted_mean = np.mean(np.delete(data, s, axis=2), axis=2) loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)), pairwise=False, summary_statistic=None, tolerate_nans=tolerate_nans) shifted_isc.append(loo_isc) shifted_isc = compute_summary_statistic( np.dstack(shifted_isc), summary_statistic=summary_statistic, axis=2) distribution.append(shifted_isc) random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED)) distribution = np.vstack(distribution) p = p_from_null(observed, distribution, side=, exact=False, axis=0) return observed, p, distribution
Phase randomization for one-sample ISC test For each voxel or ROI, compute the actual ISC and p-values from a null distribution of ISCs where response time series are phase randomized prior to computing ISC. If pairwise, apply phase randomization to each subject and compute pairwise ISCs. If leave-one-out approach is used (pairwise=False), only apply phase randomization to the left-out subject in each iteration of the leave-one-out procedure. Input data should be a list where each item is a time-points by voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a single ndarray is supplied, the last dimension is assumed to correspond to subjects. When using leave-one-out approach, NaNs are ignored when computing mean time series of N-1 subjects (default: tolerate_nans=True). Alternatively, you may supply a float between 0 and 1 indicating a threshold proportion of N subjects with non-NaN values required when computing the average time series for a given voxel. For example, if tolerate_nans=.8, ISCs will be computed for any voxel where >= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN values will be assigned NaNs. If set to False, NaNs are not tolerated and voxels with one or more NaNs among the N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False will not affect the pairwise approach; however, if a threshold float is provided, voxels that do not reach this threshold will be excluded. Note that accommodating NaNs may be notably slower than setting tolerate_nans to False. Returns the observed ISC and p-values (two-tailed test), as well as the null distribution of ISCs computed on phase-randomized data. The implementation is based on the work in [Lerner2011]_ and [Simony2016]_. .. [Lerner2011] "Topographic mapping of a hierarchy of temporal receptive windows using a narrated story.", Y. Lerner, C. J. Honey, L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915. https://doi.org/10.1523/jneurosci.3684-10.2011 Parameters ---------- data : list or ndarray (n_TRs x n_voxels x n_subjects) fMRI data for which to compute ISFC pairwise : bool, default: False Whether to use pairwise (True) or leave-one-out (False) approach summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' n_shifts : int, default: 1000 Number of randomly shifted samples tolerate_nans : bool or float, default: True Accommodate NaNs (when averaging in leave-one-out approach) random_state = int, None, or np.random.RandomState, default: None Initial random seed Returns ------- observed : float, observed ISC (without time-shifting) Actual ISCs p : float, p-value p-value based on time-shifting randomization test distribution : ndarray, time-shifts by voxels (optional) Time-shifted null distribution if return_bootstrap=True
def get_lightcurve(self, star_id, return_1d=True): filename = .format(self.dirname, star_id) try: data = np.loadtxt(self.data.extractfile(filename)) except KeyError: raise ValueError("invalid star id: {0}".format(star_id)) RA = data[:, 0] DEC = data[:, 1] t = data[:, 2::3] y = data[:, 3::3] dy = data[:, 4::3] nans = (y == -99.99) t[nans] = np.nan y[nans] = np.nan dy[nans] = np.nan if return_1d: t, y, dy, filts = np.broadcast_arrays(t, y, dy, [, , , , ]) good = ~np.isnan(t) return t[good], y[good], dy[good], filts[good] else: return t, y, dy
Get the light curves for the given ID Parameters ---------- star_id : int A valid integer star id representing an object in the dataset return_1d : boolean (default=True) Specify whether to return 1D arrays of (t, y, dy, filts) or 2D arrays of (t, y, dy) where each column is a filter. Returns ------- t, y, dy : np.ndarrays (if return_1d == False) Times, magnitudes, and magnitude errors. The shape of each array is [Nobs, 5], where the columns refer to [u,g,r,i,z] bands. Non-observations are indicated by NaN. t, y, dy, filts : np.ndarrays (if return_1d == True) Times, magnitudes, magnitude errors, and filters The shape of each array is [Nobs], and non-observations are filtered out.
def _pathway_feature_permutation(pathway_feature_tuples, permutation_max_iters): pathways, features = [list(elements_at_position) for elements_at_position in zip(*pathway_feature_tuples)] original_pathways = pathways[:] random.shuffle(pathways) feature_block_locations = {} i = 0 while i < len(pathways): starting_index = i current_feature = features[i] pathway_set = set() while i < len(pathways) and features[i] == current_feature: if pathways[i] not in pathway_set: pathway_set.add(pathways[i]) else: k = 0 random_pathway = None while True: j = random.choice(range(0, len(pathways))) random_pathway = pathways[j] random_feature = features[j] if (random_pathway != pathways[i] and random_pathway not in pathway_set): if random_feature not in feature_block_locations: break feature_block_start, feature_block_end = \ feature_block_locations[random_feature] pathway_block = pathways[feature_block_start: feature_block_end] if pathways[i] not in pathway_block: break k += 1 if k > permutation_max_iters: print("Permutation step: reached the maximum " "number of iterations {0}.".format( permutation_max_iters)) return None pathway_set.add(random_pathway) pathways[j] = pathways[i] pathways[i] = random_pathway i += 1 ending_index = i feature_block_locations[current_feature] = ( starting_index, ending_index) if original_pathways == pathways: return None return list(zip(pathways, features))
Permute the pathways across features for one side in the network. Used in `permute_pathways_across_features` Parameters ----------- pathway_feature_tuples : list(tup(str, int)) a tuple list [(pathway, feature)] where the pathway, feature pairing indicates that a pathway was overrepresented in that feature permutation_max_iters : int specify the maximum number of iterations, limit the number of attempts we have to generate a permutation Returns ----------- list(tup(str, int)), the list of pathway, feature pairings after the permutation
def plot_report_from_path(path, success_name=DEFAULT_SUCCESS_NAME, fail_names=DEFAULT_FAIL_NAMES, label=None, is_max_confidence=True, linewidth=LINEWIDTH, plot_upper_bound=True): report = load(path) plot_report(report, success_name, fail_names, label, is_max_confidence, linewidth, plot_upper_bound)
Plots a success-fail curve from a confidence report stored on disk, :param path: string filepath for the stored report. (Should be the output of make_confidence_report*.py) :param success_name: The name (confidence report key) of the data that should be used to measure success rate :param fail_names: A list of names (confidence report keys) of the data that should be used to measure failure rate. *Only one of these keys will be plotted*. Each key will be tried in order until one is found in the report. This is to support both the output of `make_confidence_report` and `make_confidence_report_bundled`. :param label: Optional string. Name to use for this curve in the legend. :param is_max_confidence: bool. If True, when measuring the failure rate, treat the data as the output of a maximum confidence attack procedure. This means that the attack is optimal (assuming the underlying optimizer is good enough, *which is probably false*, so interpret the plot accordingly) for thresholds >= .5 but for lower thresholds the observed failure rate is a lower bound on the true worst failure rate and the observed coverage is an upper bound (assuming good enough optimization) on the true failure rate. The plot thus draws the threshold >= .5 portion of the curve with a solid line and the upper and lower bounds with a dashed line. See https://openreview.net/forum?id=H1g0piA9tQ for details. If False, the attack procedure is regarded as an ad hoc way of obtaining a loose lower bound, and thus the whole curve is drawn with dashed lines. :param linewidth: thickness of the line to draw :param plot_upper_bound: include upper bound on error rate in plot
def integer_list_file(cls, filename): count = 0 result = list() fd = open(filename, ) for line in fd: count = count + 1 if in line: line = line[ : line.find() ] line = line.strip() if line: try: value = cls.integer(line) except ValueError: e = sys.exc_info()[1] msg = "Error in line %d of %s: %s" msg = msg % (count, filename, str(e)) raise ValueError(msg) result.append(value) return result
Read a list of integers from a file. The file format is: - # anywhere in the line begins a comment - leading and trailing spaces are ignored - empty lines are ignored - integers can be specified as: - decimal numbers ("100" is 100) - hexadecimal numbers ("0x100" is 256) - binary numbers ("0b100" is 4) - octal numbers ("0100" is 64) @type filename: str @param filename: Name of the file to read. @rtype: list( int ) @return: List of integers read from the file.
def clear(*signals): signals = signals if signals else receivers.keys() for signal in signals: receivers[signal].clear()
Clears all callbacks for a particular signal or signals
def from_str(duration): if duration in ("0", "+0", "-0"): return datetime.timedelta() pattern = re.compile() total = 0 sign = -1 if duration[0] == else 1 matches = pattern.findall(duration) if not len(matches): raise Exception("Invalid duration {}".format(duration)) for (value, unit) in matches: if unit not in units: raise Exception( "Unknown unit {} in duration {}".format(unit, duration)) try: total += float(value) * units[unit] except: raise Exception( "Invalid value {} in duration {}".format(value, duration)) microseconds = total / _microsecond_size return datetime.timedelta(microseconds=sign * microseconds)
Parse a duration string to a datetime.timedelta
def json(self) -> dict: content = {} if self.text: content[] = self.text content[] = [control.json() for control in self.content] self.control_json[] = content return self.control_json
Returns json compatible state of the ButtonsFrame instance. Returns json compatible state of the ButtonsFrame instance including all nested buttons. Returns: control_json: Json representation of ButtonsFrame state.
def serialize_options(opts): options = (opts or {}).copy() for key in opts.keys(): if key not in DEFAULT_OPTIONS: LOG.warning("Unknown option passed to Flask-CORS: %s", key) options[] = sanitize_regex_param(options.get()) options[] = sanitize_regex_param(options.get()) return options
A helper method to serialize and processes the options dictionary.
def inq_compound(self, name): name = create_string_buffer(name) self.library.inq_compound.argtypes = [c_char_p, POINTER(c_int)] self.library.inq_compound.restype = None nfields = c_int() self.library.inq_compound(name, byref(nfields)) return nfields.value
Return the number of fields and size (not yet) of a compound type.
def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs): collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.delete_many(filter_doc, **kwargs)
Deletes one or more documents in a mongo collection. https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many :param mongo_collection: The name of the collection to delete from. :type mongo_collection: str :param filter_doc: A query that matches the documents to delete. :type filter_doc: dict :param mongo_db: The name of the database to use. Can be omitted; then the database from the connection string is used. :type mongo_db: str
def authenticate_credentials(self, userargs, password, request=None): credentials = { : password } if "=" not in userargs: user = authenticate(**credentials) if user is None: raise exceptions.AuthenticationFailed() if not user.is_active: raise exceptions.AuthenticationFailed() return (user, None)
Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params.
def download_mp4(from_idx, to_idx, _params): succ = set() fail = set() for idx in range(from_idx, to_idx): name = + str(idx) save_folder = .format(src_path=_params[], nm=name) if idx == 0 or os.path.isdir(save_folder): continue script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/video/{nm}.mpg_vcd.zip".format( \ nm=name) down_sc = .format(script=script, nm=name, src_path=_params[]) try: print(down_sc) os.system(down_sc) succ.add(idx) except OSError as error: print(error) fail.add(idx) return (succ, fail)
download mp4s
def _parse_prefix_as_idd(idd_pattern, number): match = idd_pattern.match(number) if match: match_end = match.end() digit_match = _CAPTURING_DIGIT_PATTERN.search(number[match_end:]) if digit_match: normalized_group = normalize_digits_only(digit_match.group(1)) if normalized_group == U_ZERO: return (False, number) return (True, number[match_end:]) return (False, number)
Strips the IDD from the start of the number if present. Helper function used by _maybe_strip_i18n_prefix_and_normalize(). Returns a 2-tuple: - Boolean indicating if IDD was stripped - Number with IDD stripped
def request_all_data(cls, time, pressure=None, **kwargs): endpoint = cls() df = endpoint._get_data(time, None, pressure, **kwargs) return df
Retrieve upper air observations from Iowa State's archive for all stations. Parameters ---------- time : datetime The date and time of the desired observation. pressure : float, optional The mandatory pressure level at which to request data (in hPa). If none is given, all the available data in the profiles is returned. kwargs Arbitrary keyword arguments to use to initialize source Returns ------- :class:`pandas.DataFrame` containing the data
def getTokensForText(self, body, POStags=None): return self._text.getTokensForText(self._retina, body, POStags)
Get tokenized input text Args: body, str: The text to be tokenized (required) POStags, str: Specify desired POS types (optional) Returns: list of str Raises: CorticalioException: if the request was not successful
def pretty (self): top_bot = u + u*self.cols + u return top_bot + u.join([u+line+u for line in unicode(self).split(u)]) + u + top_bot
This returns a copy of the screen as a unicode string with an ASCII text box around the screen border. This is similar to __str__/__unicode__ except that it adds a box.
def convexHull(actor_or_list, alphaConstant=0): if vu.isSequence(actor_or_list): actor = vs.Points(actor_or_list) else: actor = actor_or_list apoly = actor.clean().polydata() triangleFilter = vtk.vtkTriangleFilter() triangleFilter.SetInputData(apoly) triangleFilter.Update() poly = triangleFilter.GetOutput() delaunay = vtk.vtkDelaunay3D() if alphaConstant: delaunay.SetAlpha(alphaConstant) delaunay.SetInputData(poly) delaunay.Update() surfaceFilter = vtk.vtkDataSetSurfaceFilter() surfaceFilter.SetInputConnection(delaunay.GetOutputPort()) surfaceFilter.Update() chuact = Actor(surfaceFilter.GetOutput()) return chuact
Create a 3D Delaunay triangulation of input points. :param actor_or_list: can be either an ``Actor`` or a list of 3D points. :param float alphaConstant: For a non-zero alpha value, only verts, edges, faces, or tetra contained within the circumsphere (of radius alpha) will be output. Otherwise, only tetrahedra will be output. .. hint:: |convexHull| |convexHull.py|_
def select_day(self, day): def _select_day(day): return self.data.loc[day, slice(None)] try: return self.new(_select_day(day), self.type, self.if_fq) except: raise ValueError(.format(day))
选取日期(一般用于分钟线) Arguments: day {[type]} -- [description] Raises: ValueError -- [description] Returns: [type] -- [description]
def mergebydepth(args): p = OptionParser(mergebydepth.__doc__) p.add_option("--mindepth", default=3, type="int", help="Minimum depth required") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bedfile, fastafile = args mindepth = opts.mindepth bedgraph = make_bedgraph(bedfile) bedgraphfiltered = bedgraph + ".d{0}".format(mindepth) if need_update(bedgraph, bedgraphfiltered): filter([bedgraph, "--minaccn={0}".format(mindepth), "--outfile={0}".format(bedgraphfiltered)]) merged = bedgraphfiltered + ".merge.fasta" if need_update(bedgraphfiltered, merged): mergeBed(bedgraphfiltered, sorted=True)
%prog mergebydepth reads.bed genome.fasta Similar to mergeBed, but only returns regions beyond certain depth.
def generate_common(env): generate_darwin(env) global TeXAction if TeXAction is None: TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR") global LaTeXAction if LaTeXAction is None: LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR") global BibTeXAction if BibTeXAction is None: BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR") global BiberAction if BiberAction is None: BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR") global MakeIndexAction if MakeIndexAction is None: MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR") global MakeNclAction if MakeNclAction is None: MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR") global MakeGlossaryAction if MakeGlossaryAction is None: MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR") global MakeAcronymsAction if MakeAcronymsAction is None: MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR") try: environ = env[] except KeyError: environ = {} env[] = environ v = os.environ.get() if v: environ[] = v CDCOM = if platform.system() == : CDCOM = env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = 4 env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = env[] = SCons.Util.CLVar() env[] = CDCOM + env[] = env[] = env[] = env[] = CDCOM + env[] = env[] = CDCOM +
Add internal Builders and construction variables for LaTeX to an Environment.
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None): with tempfile.TemporaryDirectory(prefix=) as td: curve.save(os.path.join(td, )) print(.format(curve.q.min(), curve.q.max())) if Npoints_realspace is None: Npoints_realspace = "" else: Npoints_realspace = str(Npoints_realspace) if initial_alpha is None: initial_alpha = "" else: initial_alpha = str(initial_alpha) gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % ( os.path.join(td, ), os.path.join(td, ), 10 * Rmax, Npoints_realspace, initial_alpha) result = subprocess.run([], stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=gnominput.encode()) pr, metadata = read_gnom_pr(os.path.join(td, ), True) pr[:, 0] /= 10 metadata[] *= 10 metadata[] *= 10 metadata[] *= 10 metadata[] *= 10 metadata[] /= 10 metadata[] /= 10 metadata[] /= 10 metadata[] /= 10 if outputfilename is not None: shutil.copy(os.path.join(td, ), outputfilename) return pr, metadata
Run GNOM on the dataset. Inputs: curve: an instance of sastool.classes2.Curve or anything which has a save() method, saving the scattering curve to a given .dat file, in q=4*pi*sin(theta)/lambda [1/nm] units Rmax: the estimated maximum extent of the scattering object, in nm. outputfilename: the preferred name of the output file. If not given, the .out file produced by gnom will be lost. Npoints_realspace: the expected number of points in the real space initial_alpha: the initial value of the regularization parameter. Outputs: the same as of read_gnom_pr()
def info(path): fh = None try: fh = open(path,) return __readHeader(fh) finally: if fh: fh.close() return None
info(path) path is a string
def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True): stop = visit(root) root._visited = True for node in root.links: if stop: return True if not traversable(root, root.links.edge(node)): continue if not node._visited: stop = depth_first_search(node, visit, traversable) return stop
Simple, multi-purpose depth-first search. Visits all the nodes connected to the root, depth-first. The visit function is called on each node. Recursion will stop if it returns True, and ubsequently dfs() will return True. The traversable function takes the current node and edge, and returns True if we are allowed to follow this connection to the next node. For example, the traversable for directed edges is follows: lambda node, edge: node == edge.node1 Note: node._visited is expected to be False for all nodes.
def corruptVector(v1, noiseLevel, numActiveCols): size = len(v1) v2 = np.zeros(size, dtype="uint32") bitsToSwap = int(noiseLevel * numActiveCols) for i in range(size): v2[i] = v1[i] for _ in range(bitsToSwap): i = random.randrange(size) if v2[i] == 1: v2[i] = 0 else: v2[i] = 1 return v2
Corrupts a copy of a binary vector by inverting noiseLevel percent of its bits. @param v1 (array) binary vector whose copy will be corrupted @param noiseLevel (float) amount of noise to be applied on the new vector @param numActiveCols (int) number of sparse columns that represent an input @return v2 (array) corrupted binary vector
def value(self): return { "type" : self._type, "style" : self._style, "color" : self._color.value, "width" : self._width }
gets the color value
def weight(w, sparsity): w_shape = common_layers.shape_list(w) k = int(np.prod(w_shape[:-1])) count = tf.to_int32(k * sparsity) mask = common_layers.weight_targeting(w, count) return (1 - mask) * w
Weight-level magnitude pruning.
def save(self, model, path=): if model[] != : return super(TextFileContentsManager, self).save(model, path) nbk = model[] try: metadata = nbk.get() rearrange_jupytext_metadata(metadata) jupytext_metadata = metadata.setdefault(, {}) jupytext_formats = jupytext_metadata.get() or self.default_formats(path) if not jupytext_formats: text_representation = jupytext_metadata.get(, {}) ext = os.path.splitext(path)[1] fmt = {: ext} if ext == text_representation.get() and text_representation.get(): fmt[] = text_representation.get() jupytext_formats = [fmt] jupytext_formats = long_form_multiple_formats(jupytext_formats, metadata) jupytext_formats = [preferred_format(fmt, self.preferred_jupytext_formats_save) for fmt in jupytext_formats] base, fmt = find_base_path_and_format(path, jupytext_formats) self.update_paired_notebooks(path, fmt, jupytext_formats) self.set_default_format_options(jupytext_metadata) if not jupytext_metadata: metadata.pop() return_value = None value = None for fmt in jupytext_formats[::-1]: if fmt[] != : continue alt_path = full_path(base, fmt) self.create_prefix_dir(alt_path, fmt) self.log.info("Saving %s", os.path.basename(alt_path)) value = super(TextFileContentsManager, self).save(model, alt_path) if alt_path == path: return_value = value for fmt in jupytext_formats[::-1]: if fmt[] == : continue alt_path = full_path(base, fmt) self.create_prefix_dir(alt_path, fmt) if in fmt and fmt[] not in [, ]: self.log.info("Saving %s in format %s:%s", os.path.basename(alt_path), fmt[][1:], fmt[]) else: self.log.info("Saving %s", os.path.basename(alt_path)) with mock.patch(, _jupytext_writes(fmt)): value = super(TextFileContentsManager, self).save(model, alt_path) if alt_path == path: return_value = value return_value[] = value[] return return_value except Exception as err: raise HTTPError(400, str(err))
Save the file model and return the model with no content.
def send(self, command, _id=None, result={}, frames=[], threads=None, error_messages=[], warning_messages=[], info_messages=[], exception=None): with self._connection_lock: payload = { : _id, : command, : result, : , : frames, : info_messages, : warning_messages, : error_messages, : exception } if threads: payload[] = threads msg = self.encode(payload) if self._connection: msg_bytes = bytearray(msg, ) send_bytes_count = self._connection.sendall(msg_bytes) self.log_sent(msg) return send_bytes_count raise IKPdbConnectionError("Connection lost!")
Build a message from parameters and send it to debugger. :param command: The command sent to the debugger client. :type command: str :param _id: Unique id of the sent message. Right now, it's always `None` for messages by debugger to client. :type _id: int :param result: Used to send `exit_code` and updated `executionStatus` to debugger client. :type result: dict :param frames: contains the complete stack frames when debugger sends the `programBreak` message. :type frames: list :param error_messages: A list of error messages the debugger client must display to the user. :type error_messages: list of str :param warning_messages: A list of warning messages the debugger client must display to the user. :type warning_messages: list of str :param info_messages: A list of info messages the debugger client must display to the user. :type info_messages: list of str :param exception: If debugger encounter an exception, this dict contains 2 keys: `type` and `info` (the later is the message). :type exception: dict
def calculate_one_hot_encoder_output_shapes(operator): ] C check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) if operator.inputs[0].type.shape[1] != 1 or len(operator.inputs[0].type.shape) > 2: raise RuntimeError() int_categories = operator.raw_operator.oneHotEncoder.int64Categories.vector str_categories = operator.raw_operator.oneHotEncoder.stringCategories.vector N = operator.inputs[0].type.shape[0] if len(int_categories) > 0: operator.outputs[0].type = FloatTensorType([N, len(int_categories)], doc_string=operator.outputs[0].type.doc_string) elif len(str_categories) > 0 and type(operator.inputs[0].type) == StringTensorType: operator.outputs[0].type = FloatTensorType([N, len(str_categories)], doc_string=operator.outputs[0].type.doc_string) else: raise ValueError()
Allowed input/output patterns are 1. [N, 1] ---> [N, C'] C' is the total number of categorical values.
def format_lp(nodes, constraints_x, qa, constraints_y, qb): lp_handle = cStringIO.StringIO() lp_handle.write("Maximize\n ") records = 0 for i, score in nodes: lp_handle.write("+ %d x%d " % (score, i)) records += 1 if records % 10 == 0: lp_handle.write("\n") lp_handle.write("\n") num_of_constraints = 0 lp_handle.write("Subject To\n") for c in constraints_x: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qa)) num_of_constraints += len(constraints_x) if not (constraints_x is constraints_y): for c in constraints_y: additions = " + ".join("x%d" % (x+1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qb)) num_of_constraints += len(constraints_y) print("number of variables (%d), number of constraints (%d)" % (len(nodes), num_of_constraints), file=sys.stderr) lp_handle.write("Binary\n") for i, score in nodes: lp_handle.write(" x%d\n" % i) lp_handle.write("End\n") lp_data = lp_handle.getvalue() lp_handle.close() return lp_data
Maximize 4 x1 + 2 x2 + 3 x3 + x4 Subject To x1 + x2 <= 1 End
def load_loops(directory, loops_path=None): if loops_path is None: workspace = workspace_from_dir(directory) loops_path = workspace.loops_path from klab.rosetta.input_files import LoopsFile loops_parser = LoopsFile.from_filepath(loops_path) return [(x-1, y+1) for x, y in loops_parser.get_distinct_segments()]
Return a list of tuples indicating the start and end points of the loops that were sampled in the given directory.
def register(key, initializer: callable, param=None): get_current_scope().container.register(key, initializer, param)
Adds resolver to global container
def add_match(self, entity, *traits): for trait in traits: if trait not in self.index: self.index[trait] = self.mismatch_unknown.copy() for existing_trait in self.index: if existing_trait not in traits: self.index[existing_trait].add(entity) self.mismatch_unknown.add(entity)
Add a matching entity to the index. We have to maintain the constraints of the data layout: - `self.mismatch_unknown` must still contain all matched entities - each key of the index must mismatch all known matching entities except those this particular key explicitly includes For data layout description, see the class-level docstring. :param collections.Hashable entity: an object to be matching the values of `traits_indexed_by` :param list traits: a list of hashable values to index the object with
def _remove_buffers(state): buffer_paths, buffers = [], [] state = _separate_buffers(state, [], buffer_paths, buffers) return state, buffer_paths, buffers
Return (state_without_buffers, buffer_paths, buffers) for binary message parts A binary message part is a memoryview, bytearray, or python 3 bytes object. As an example: >>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}} >>> _remove_buffers(state) ({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']], [<memory at 0x107ffec48>, <memory at 0x107ffed08>])
def collect_blame_info(cls, matches): old_area = None for filename, ranges in matches: area, name = os.path.split(filename) if not area: area = if area != old_area: print("\n\n%s/\n" % area) old_area = area print("%s " % name, end="") filter = cls.build_line_range_filter(ranges) command = [, , ] + filter + [name] os.chdir(area) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if err: print(" <<<<<<<<<< Unable to collect info:", err) else: yield out
Runs git blame on files, for the specified sets of line ranges. If no line range tuples are provided, it will do all lines.
def _merge_raw(self, other): if other is None: variables = OrderedDict(self.variables) else: variables = expand_and_merge_variables( [self.variables, other.variables]) return variables
For use with binary arithmetic.
def restore(self): yield from self._project.delete_on_computes() yield from self._project.close(ignore_notification=True) self._project.controller.notification.emit("snapshot.restored", self.__json__()) try: if os.path.exists(os.path.join(self._project.path, "project-files")): shutil.rmtree(os.path.join(self._project.path, "project-files")) with open(self._path, "rb") as f: project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path) except (OSError, PermissionError) as e: raise aiohttp.web.HTTPConflict(text=str(e)) yield from project.open() return project
Restore the snapshot
def loaded(self, request, *args, **kwargs): serializer = self.get_serializer(list(Pack.objects.all()), many=True) return Response(serializer.data)
Return a list of loaded Packs.
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_src_port(self, **kwargs): config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info output = ET.SubElement(show_fabric_trunk_info, "output") show_trunk_list = ET.SubElement(output, "show-trunk-list") trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups") trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member") trunk_list_src_port = ET.SubElement(trunk_list_member, "trunk-list-src-port") trunk_list_src_port.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def generate_enums_info(enums, msgs): for enum in enums: enum.swift_name = camel_case_from_underscores(enum.name) enum.raw_value_type = get_enum_raw_type(enum, msgs) enum.formatted_description = "" if enum.description: enum.description = " ".join(enum.description.split()) enum.formatted_description = "\n/**\n %s\n*/\n" % enum.description all_entities = [] entities_info = [] for entry in enum.entry: name = entry.name.replace(enum.name + , ) if name[0].isdigit(): name = "MAV_" + name entry.swift_name = camel_case_from_underscores(name) entry.formatted_description = "" if entry.description: entry.description = " ".join(entry.description.split()) entry.formatted_description = "\n\t/// " + entry.description + "\n" all_entities.append(entry.swift_name) entities_info.append( % (entry.name, entry.description.replace(,))) enum.all_entities = ", ".join(all_entities) enum.entities_info = ", ".join(entities_info) enum.entity_description = enum.description.replace(,) enums.sort(key = lambda enum : enum.swift_name)
Add camel case swift names for enums an entries, descriptions and sort enums alphabetically
def _configure_manager(self): self._manager = CloudDatabaseManager(self, resource_class=CloudDatabaseInstance, response_key="instance", uri_base="instances") self._flavor_manager = BaseManager(self, resource_class=CloudDatabaseFlavor, response_key="flavor", uri_base="flavors") self._backup_manager = CloudDatabaseBackupManager(self, resource_class=CloudDatabaseBackup, response_key="backup", uri_base="backups")
Creates a manager to handle the instances, and another to handle flavors.
def enum_subpattern(p): subpattern_id, d = p patterns = list(enum_gen(d)) return patterns
if subpattern_id: subpat_iter = EnumSubpatternIterator(subpattern_id, patterns) SUBPATTERNS[subpattern_id] = subpat_iter return subpat_iter else: return patterns
def _print_sql_with_error(self, sql, error_line): if os.linesep in sql: lines = sql.split(os.linesep) digits = math.ceil(math.log(len(lines) + 1, 10)) i = 1 for line in lines: if i == error_line: self._io.text(.format(i, line, width=digits, )) else: self._io.text(.format(i, line, width=digits, )) i += 1 else: self._io.text(sql)
Writes a SQL statement with an syntax error to the output. The line where the error occurs is highlighted. :param str sql: The SQL statement. :param int error_line: The line where the error occurs.
def ifftn(a, s=None, axes=None, norm=None): unitary = _unitary(norm) output = mkl_fft.ifftn(a, s, axes) if unitary: output *= sqrt(_tot_size(output, axes)) return output
Compute the N-dimensional inverse discrete Fourier Transform. This function computes the inverse of the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, ``ifftn(fftn(a)) == a`` to within numerical accuracy. For a description of the definitions and conventions used, see `numpy.fft`. The input, analogously to `ifft`, should be ordered in the same way as is returned by `fftn`, i.e. it should have the term for zero frequency in all axes in the low-order corner, the positive frequency terms in the first half of all axes, the term for the Nyquist frequency in the middle of all axes and the negative frequency terms in the second half of all axes, in order of decreasingly negative frequency. Parameters ---------- a : array_like Input array, can be complex. s : sequence of ints, optional Shape (length of each transformed axis) of the output (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). This corresponds to ``n`` for ``ifft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. See notes for issue on `ifft` zero padding. axes : sequence of ints, optional Axes over which to compute the IFFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. Repeated indices in `axes` means that the inverse transform over that axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` or `a`, as explained in the parameters section above. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- numpy.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. ifft : The one-dimensional inverse FFT. ifft2 : The two-dimensional inverse FFT. ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning of array. Notes ----- See `numpy.fft` for definitions and conventions used. Zero-padding, analogously with `ifft`, is performed by appending zeros to the input along the specified dimension. Although this is the common approach, it might lead to surprising results. If another form of zero padding is desired, it must be performed before `ifftn` is called. Examples -------- >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt >>> n = np.zeros((200,200), dtype=complex) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) <matplotlib.image.AxesImage object at 0x...> >>> plt.show()
def sql_flush(self, style, tables, sequences, allow_cascade=False): for table in tables: qs = "TRUNCATE {}".format(table) self.connection.connection.execute(qs) return []
Truncate all existing tables in current keyspace. :returns: an empty list
def update_time_login(u_name): entry = TabMember.update( time_login=tools.timestamp() ).where( TabMember.user_name == u_name ) entry.execute()
Update the login time for user.