Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
388,800
def parse_csr(): LOGGER.info("Parsing CSR...") cmd = [ , , , os.path.join(gettempdir(), ), , ] devnull = open(os.devnull, ) out = subprocess.check_output(cmd, stderr=devnull) domains = set([]) common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode()) if common_name is not None: domains.add(common_name.group(1)) subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode(), re.MULTILINE | re.DOTALL) if subject_alt_names is not None: for san in subject_alt_names.group(1).split(", "): if san.startswith("DNS:"): domains.add(san[4:]) return domains
Parse certificate signing request for domains
388,801
def appendRandomLenPadding(str, blocksize=AES_blocksize): pad_len = paddingLength(len(str), blocksize) - 1 from os import urandom padding = urandom(pad_len)+chr(pad_len) return str + padding
ISO 10126 Padding (withdrawn, 2007): Pad with random bytes + last byte equal to the number of padding bytes
388,802
def render(self, *args, **kwargs): uid_with_str = args[0] slug = kwargs.get(, False) with_title = kwargs.get(, False) glyph = kwargs.get(, ) kwd = { : glyph } curinfo = MCategory.get_by_uid(uid_with_str) sub_cats = MCategory.query_sub_cat(uid_with_str) if slug: tmpl = else: tmpl = return self.render_string(tmpl, pcatinfo=curinfo, sub_cats=sub_cats, recs=sub_cats, with_title=with_title, kwd=kwd)
fun(uid_with_str) fun(uid_with_str, slug = val1, glyph = val2)
388,803
def _win32_is_hardlinked(fpath1, fpath2): def get_read_handle(fpath): if os.path.isdir(fpath): dwFlagsAndAttributes = jwfs.api.FILE_FLAG_BACKUP_SEMANTICS else: dwFlagsAndAttributes = 0 hFile = jwfs.api.CreateFile(fpath, jwfs.api.GENERIC_READ, jwfs.api.FILE_SHARE_READ, None, jwfs.api.OPEN_EXISTING, dwFlagsAndAttributes, None) return hFile def get_unique_id(hFile): info = jwfs.api.BY_HANDLE_FILE_INFORMATION() res = jwfs.api.GetFileInformationByHandle(hFile, info) jwfs.handle_nonzero_success(res) unique_id = (info.volume_serial_number, info.file_index_high, info.file_index_low) return unique_id hFile1 = get_read_handle(fpath1) hFile2 = get_read_handle(fpath2) try: are_equal = (get_unique_id(hFile1) == get_unique_id(hFile2)) except Exception: raise finally: jwfs.api.CloseHandle(hFile1) jwfs.api.CloseHandle(hFile2) return are_equal
Test if two hard links point to the same location CommandLine: python -m ubelt._win32_links _win32_is_hardlinked Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_hardlink') >>> ub.delete(root) >>> ub.ensuredir(root) >>> fpath1 = join(root, 'fpath1') >>> fpath2 = join(root, 'fpath2') >>> ub.touch(fpath1) >>> ub.touch(fpath2) >>> fjunc1 = _win32_junction(fpath1, join(root, 'fjunc1')) >>> fjunc2 = _win32_junction(fpath2, join(root, 'fjunc2')) >>> assert _win32_is_hardlinked(fjunc1, fpath1) >>> assert _win32_is_hardlinked(fjunc2, fpath2) >>> assert not _win32_is_hardlinked(fjunc2, fpath1) >>> assert not _win32_is_hardlinked(fjunc1, fpath2)
388,804
def cover(self): if not self._cover: self._cover = Picture(self._cover_url, self._connection) return self._cover
album cover as :class:`Picture` object
388,805
def build(self, recipe=None, image=None, isolated=False, sandbox=False, writable=False, build_folder=None, robot_name=False, ext=, sudo=True, stream=False): t give your image a name (with "image") then a fun robot name will be generated instead. Highly recommended :) sudo: give sudo to the command (or not) default is True for build buildversion 3sifs loaded image if recipe is None: recipe = self._get_uri() cmd = cmd + [image, recipe] if stream is False: output = self._run_command(cmd, sudo=sudo, capture=False) else: return image, stream_command(cmd, sudo=sudo) if os.path.exists(image): return image
build a singularity image, optionally for an isolated build (requires sudo). If you specify to stream, expect the image name and an iterator to be returned. image, builder = Client.build(...) Parameters ========== recipe: the path to the recipe file (or source to build from). If not defined, we look for "Singularity" file in $PWD image: the image to build (if None, will use arbitary name isolated: if True, run build with --isolated flag sandbox: if True, create a writable sandbox writable: if True, use writable ext3 (sandbox takes preference) build_folder: where the container should be built. ext: the image extension to use. robot_name: boolean, default False. if you don't give your image a name (with "image") then a fun robot name will be generated instead. Highly recommended :) sudo: give sudo to the command (or not) default is True for build
388,806
def _expectation(p, mean1, none1, mean2, none2, nghp=None): with params_as_tensors_for(mean1): N = tf.shape(p.mu)[0] e_xxt = p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) e_A_xxt = tf.matmul(tf.tile(mean1.A[None, ...], (N, 1, 1)), e_xxt, transpose_a=True) e_b_xt = mean1.b[None, :, None] * p.mu[:, None, :] return e_A_xxt + e_b_xt
Compute the expectation: expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n) - m1(.) :: Linear mean function - m2(.) :: Identity mean function :return: NxQxD
388,807
def _on_send_complete(self, handle, error): assert handle is self._handle self._write_buffer_size -= 1 assert self._write_buffer_size >= 0 if self._error: self._log.debug(, error) elif error and error != pyuv.errno.UV_ECANCELED: self._log.warning(, error) self._protocol.error_received(TransportError.from_errno(error)) self._maybe_resume_protocol() self._maybe_close()
Callback used with handle.send().
388,808
def set_access_cookies(response, encoded_access_token, max_age=None): if not config.jwt_in_cookies: raise RuntimeWarning("set_access_cookies() called without " " configured to use cookies") response.set_cookie(config.access_cookie_name, value=encoded_access_token, max_age=max_age or config.cookie_max_age, secure=config.cookie_secure, httponly=True, domain=config.cookie_domain, path=config.access_cookie_path, samesite=config.cookie_samesite) if config.csrf_protect and config.csrf_in_cookies: response.set_cookie(config.access_csrf_cookie_name, value=get_csrf_token(encoded_access_token), max_age=max_age or config.cookie_max_age, secure=config.cookie_secure, httponly=False, domain=config.cookie_domain, path=config.access_csrf_cookie_path, samesite=config.cookie_samesite)
Takes a flask response object, and an encoded access token, and configures the response to set in the access token in a cookie. If `JWT_CSRF_IN_COOKIES` is `True` (see :ref:`Configuration Options`), this will also set the CSRF double submit values in a separate cookie. :param response: The Flask response object to set the access cookies in. :param encoded_access_token: The encoded access token to set in the cookies. :param max_age: The max age of the cookie. If this is None, it will use the `JWT_SESSION_COOKIE` option (see :ref:`Configuration Options`). Otherwise, it will use this as the cookies `max-age` and the JWT_SESSION_COOKIE option will be ignored. Values should be the number of seconds (as an integer).
388,809
def _to_dict(self): _dict = {} if hasattr(self, ) and self.type is not None: _dict[] = self.type if hasattr(self, ) and self.credential_id is not None: _dict[] = self.credential_id if hasattr(self, ) and self.schedule is not None: _dict[] = self.schedule._to_dict() if hasattr(self, ) and self.options is not None: _dict[] = self.options._to_dict() return _dict
Return a json dictionary representing this model.
388,810
def process(self, request, response, environ): data = self.authorize(request, response, environ, self.scope_handler.scopes) if isinstance(data, Response): return data code = self.token_generator.generate() expires = int(time.time()) + self.token_expiration auth_code = AuthorizationCode(client_id=self.client.identifier, code=code, expires_at=expires, redirect_uri=self.client.redirect_uri, scopes=self.scope_handler.scopes, data=data[0], user_id=data[1]) self.auth_code_store.save_code(auth_code) response.add_header("Location", self._generate_location(code)) response.body = "" response.status_code = 302 return response
Generates a new authorization token. A form to authorize the access of the application can be displayed with the help of `oauth2.web.SiteAdapter`.
388,811
def clone(self): cloned_filters = [f.clone() for f in self.filters] return self.__class__(cloned_filters, self.attr_type, self.attr_value)
This method clones AttributeMap object. Returns AttributeMap object that has the same values with the original one.
388,812
def consul_fetch(client, path): return client.kv.get( if not path else path.rstrip() + , recurse=True)
Query consul for all keys/values within base path
388,813
def can_user_update_settings(request, view, obj=None): if obj is None: return if obj.customer and not obj.shared: return permissions.is_owner(request, view, obj) else: return permissions.is_staff(request, view, obj)
Only staff can update shared settings, otherwise user has to be an owner of the settings.
388,814
def attach(self, to_linode, config=None): result = self._client.post(.format(Volume.api_endpoint), model=self, data={ "linode_id": to_linode.id if issubclass(type(to_linode), Base) else to_linode, "config": None if not config else config.id if issubclass(type(config), Base) else config, }) if not in result: raise UnexpectedResponseError(, json=result) self._populate(result) return True
Attaches this Volume to the given Linode
388,815
def raise_check_result(self): if not self.__class__.log_active_checks: return log_level = if self.state == : log_level = elif self.state == : log_level = brok = make_monitoring_log( log_level, % (self.get_name(), self.state, self.attempt, self.output) ) self.broks.append(brok)
Raise ACTIVE CHECK RESULT entry Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..." :return: None
388,816
def get_mask_from_prob(self, cloud_probs, threshold=None): threshold = self.threshold if threshold is None else threshold if self.average_over: cloud_masks = np.asarray([convolve(cloud_prob, self.conv_filter) > threshold for cloud_prob in cloud_probs], dtype=np.int8) else: cloud_masks = (cloud_probs > threshold).astype(np.int8) if self.dilation_size: cloud_masks = np.asarray([dilation(cloud_mask, self.dilation_filter) for cloud_mask in cloud_masks], dtype=np.int8) return cloud_masks
Returns cloud mask by applying morphological operations -- convolution and dilation -- to input cloud probabilities. :param cloud_probs: cloud probability map :type cloud_probs: numpy array of cloud probabilities (shape n_images x n x m) :param threshold: A float from [0,1] specifying threshold :type threshold: float :return: raster cloud mask :rtype: numpy array (shape n_images x n x m)
388,817
def cancel_download_task(self, task_id, expires=None, **kwargs): data = { : expires, : task_id, } url = .format(BAIDUPAN_SERVER) return self._request(, , url=url, data=data, **kwargs)
取消离线下载任务. :param task_id: 要取消的任务ID号。 :type task_id: str :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: requests.Response
388,818
def place2thing(self, name, location): self.engine._set_thing_loc( self.name, name, location ) if (self.name, name) in self.engine._node_objs: obj = self.engine._node_objs[self.name, name] thing = Thing(self, name) for port in obj.portals(): port.origin = thing for port in obj.preportals(): port.destination = thing self.engine._node_objs[self.name, name] = thing
Turn a Place into a Thing with the given location. It will keep all its attached Portals.
388,819
def _show_menu(self): current_widget = self._tabbed_window.get_current_widget() if hasattr(current_widget, ): menu_widget = current_widget.get_menu_widget(self._hide_menu) overlay = urwid.Overlay(menu_widget, self._tabbed_window, align=, width=(, 80), valign=, height=(, 80)) self._urwid_loop.widget = overlay
Show the overlay menu.
388,820
def nvim_io_recover(self, io: NvimIORecover[A]) -> NvimIO[B]: return eval_step(self.vim)(io.map(lambda a: a))
calls `map` to shift the recover execution to flat_map_nvim_io
388,821
def _get_adc_value(self, channel, average=None): conf = self.SCAN_OFF | self.SINGLE_ENDED | ((0x1e) & (channel << 1)) self._intf.write(self._base_addr + self.MAX_1239_ADD, array(, pack(, conf))) def read_data(): ret = self._intf.read(self._base_addr + self.MAX_1239_ADD | 1, size=2) ret.reverse() ret[1] = ret[1] & 0x0f return unpack_from(, ret)[0] if average: raw = 0 for _ in range(average): raw += read_data() raw /= average else: raw = read_data() return raw
Read ADC
388,822
def furtherArgsProcessing(args): if isinstance(args, str): unprocessed = args.strip().split() if unprocessed[0] == : del unprocessed[0] args = parser.parse_args(unprocessed).__dict__ elif isinstance(args, argparse.Namespace): args = args.__dict__ elif isinstance(args, dict): pass else: raise CytherError( "Args must be a instance of str or argparse.Namespace, not ".format( str(type(args)))) if args[]: args[] = True args[] = {: 0, : 0, : 0, : 0} args[] = True return args
Converts args, and deals with incongruities that argparse couldn't handle
388,823
def _mutect_variant_stats(variant, sample_info): ref_depth, alt_depth = sample_info["AD"] depth = int(ref_depth) + int(alt_depth) vaf = float(alt_depth) / depth return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Mutect-specific variant calling fields Returns ------- VariantStats
388,824
def FromFile(cls, path, actions_dict, resources_dict, file_format="yaml", name=None): format_map = { "yaml": cls._process_yaml } format_handler = format_map.get(file_format) if format_handler is None: raise ArgumentError("Unknown file format or file extension", file_format=file_format, \ known_formats=[x for x in format_map if format_map[x] is not None]) recipe_info = format_handler(path) if name is None: name, _ext = os.path.splitext(os.path.basename(path)) try: recipe_info = RecipeSchema.verify(recipe_info) except ValidationError as exc: raise RecipeFileInvalid("Recipe file does not match expected schema", file=path, error_message=exc.msg, **exc.params) description = recipe_info.get() try: resources = cls._parse_resource_declarations(recipe_info.get(, []), resources_dict) defaults = cls._parse_variable_defaults(recipe_info.get("defaults", [])) steps = [] for i, action in enumerate(recipe_info.get(, [])): action_name = action.pop() if action_name is None: raise RecipeFileInvalid("Action is missing required name parameter", \ parameters=action, path=path) action_class = actions_dict.get(action_name) if action_class is None: raise UnknownRecipeActionType("Unknown step specified in recipe", \ action=action_name, step=i + 1, path=path) step_resources = cls._parse_resource_usage(action, declarations=resources) fixed_files, _variable_files = cls._parse_file_usage(action_class, action) step = RecipeStep(action_class, action, step_resources, fixed_files) steps.append(step) return RecipeObject(name, description, steps, resources, defaults, path) except RecipeFileInvalid as exc: cls._future_raise(RecipeFileInvalid, RecipeFileInvalid(exc.msg, recipe=name, **exc.params), sys.exc_info()[2])
Create a RecipeObject from a file. The file should be a specially constructed yaml file that describes the recipe as well as the actions that it performs. Args: path (str): The path to the recipe file that we wish to load actions_dict (dict): A dictionary of named RecipeActionObject types that is used to look up all of the steps listed in the recipe file. resources_dict (dict): A dictionary of named RecipeResource types that is used to look up all of the shared resources listed in the recipe file. file_format (str): The file format of the recipe file. Currently we only support yaml. name (str): The name of this recipe if we created it originally from an archive.
388,825
def link_page_filter(self, page, modelview_name): new_args = request.view_args.copy() args = request.args.copy() args["page_" + modelview_name] = page return url_for( request.endpoint, **dict(list(new_args.items()) + list(args.to_dict().items())) )
Arguments are passed like: page_<VIEW_NAME>=<PAGE_NUMBER>
388,826
def format_struct(struct_def): text = [] text.append(.format(struct_def.tp_name)) text.extend( [.format(tab, format_variable(var)) for var in struct_def.members] ) for name in struct_def.names: text.append(.format(struct_def.tp_name, name)) return text
Returns a cython struct from a :attr:`StructSpec` instance.
388,827
def read_memory(self): if self.mem_empty == True: if self.mem_idx == 0: m_x = np.zeros(self.n) m_d = 0 else: m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0) m_d = np.mean(self.mem_d[:self.mem_idx]) else: m_x = np.mean(self.mem_x, axis=0) m_d = np.mean(np.delete(self.mem_d, self.mem_idx)) self.mem_idx += 1 if self.mem_idx > len(self.mem_x)-1: self.mem_idx = 0 self.mem_empty = False return m_d, m_x
This function read mean value of target`d` and input vector `x` from history
388,828
def method(self, quote_id, payment_data, store_view=None): return bool( self.call(, [quote_id, payment_data, store_view]) )
Allows you to set a payment method for a shopping cart (quote). :param quote_id: Shopping cart ID (quote ID) :param payment_data, dict of payment details, example { 'po_number': '', 'method': 'checkmo', 'cc_cid': '', 'cc_owner': '', 'cc_number': '', 'cc_type': '', 'cc_exp_year': '', 'cc_exp_month': '' } :param store_view: Store view ID or code :return: boolean, True on success
388,829
def make_library(**kwargs): library_yaml = kwargs.pop(, ) comp_yaml = kwargs.pop(, ) basedir = kwargs.pop(, os.path.abspath()) model_man = kwargs.get(, ModelManager(basedir=basedir)) model_comp_dict = model_man.make_library(library_yaml, library_yaml, comp_yaml) return dict(model_comp_dict=model_comp_dict, ModelManager=model_man)
Build and return a ModelManager object and fill the associated model library
388,830
def publish_proto_in_ipfs(self): ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) self._printout(ipfs_hash_base58)
Publish proto files in ipfs and print hash
388,831
def parse_delete_zone(prs, conn): prs_zone_delete = prs.add_parser(, help=) prs_zone_delete.add_argument(, action=, required=True, help=) conn_options(prs_zone_delete, conn) prs_zone_delete.set_defaults(func=delete_zone)
Delete zone. Arguments: prs: parser object of argparse conn: dictionary of connection information
388,832
def AddMapping(self, filename, new_mapping): for field in self._REQUIRED_MAPPING_FIELDS: if field not in new_mapping: raise problems.InvalidMapping(field) if filename in self.GetKnownFilenames(): raise problems.DuplicateMapping(filename) self._file_mapping[filename] = new_mapping
Adds an entry to the list of known filenames. Args: filename: The filename whose mapping is being added. new_mapping: A dictionary with the mapping to add. Must contain all fields in _REQUIRED_MAPPING_FIELDS. Raises: DuplicateMapping if the filename already exists in the mapping InvalidMapping if not all required fields are present
388,833
def replace_namespaced_pod(self, name, namespace, body, **kwargs): kwargs[] = True if kwargs.get(): return self.replace_namespaced_pod_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_pod_with_http_info(name, namespace, body, **kwargs) return data
replace_namespaced_pod # noqa: E501 replace the specified Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_pod(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Pod (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Pod body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Pod If the method is called asynchronously, returns the request thread.
388,834
def spin(self, use_thread=False): if use_thread: if self._thread is not None: raise self._thread = threading.Thread(target=self._spin_internal) self._thread.setDaemon(True) self._thread.start() else: self._spin_internal()
call callback for all data forever (until \C-c) :param use_thread: use thread for spin (do not block)
388,835
def send(self): self._generate_email() if self.verbose: print( "Debugging info" "\n--------------" "\n{} Message created.".format(timestamp()) ) recipients = [] for i in (self.to, self.cc, self.bcc): if i: if isinstance(i, MutableSequence): recipients += i else: recipients.append(i) session = self._get_session() if self.verbose: print(timestamp(), "Login successful.") session.sendmail(self.from_, recipients, self.message.as_string()) session.quit() if self.verbose: print(timestamp(), "Logged out.") if self.verbose: print( timestamp(), type(self).__name__ + " info:", self.__str__(indentation="\n * "), ) print("Message sent.")
Send the message. First, a message is constructed, then a session with the email servers is created, finally the message is sent and the session is stopped.
388,836
def pretty_print(self): return u"%s\t%.3f\t%.3f\t%s" % ( (self.identifier or u""), (self.begin if self.begin is not None else TimeValue("-2.000")), (self.end if self.end is not None else TimeValue("-1.000")), (self.text or u"") )
Pretty print representation of this fragment, as ``(identifier, begin, end, text)``. :rtype: string .. versionadded:: 1.7.0
388,837
def _to_dict(self): _dict = {} if hasattr(self, ) and self.content is not None: _dict[] = self.content if hasattr(self, ) and self.id is not None: _dict[] = self.id if hasattr(self, ) and self.created is not None: _dict[] = self.created if hasattr(self, ) and self.updated is not None: _dict[] = self.updated if hasattr(self, ) and self.contenttype is not None: _dict[] = self.contenttype if hasattr(self, ) and self.language is not None: _dict[] = self.language if hasattr(self, ) and self.parentid is not None: _dict[] = self.parentid if hasattr(self, ) and self.reply is not None: _dict[] = self.reply if hasattr(self, ) and self.forward is not None: _dict[] = self.forward return _dict
Return a json dictionary representing this model.
388,838
def depth(self): return len(self.path.rstrip(os.sep).split(os.sep))
Returns the number of ancestors of this directory.
388,839
def update(self, name, color): json = None if name and color: if color[0] == : color = color[1:] json = self._json(self._patch(self._api, data=dumps({ : name, : color})), 200) if json: self._update_(json) return True return False
Update this label. :param str name: (required), new name of the label :param str color: (required), color code, e.g., 626262, no leading '#' :returns: bool
388,840
def rename(self, container, name): url = self._url("/containers/{0}/rename", container) params = {: name} res = self._post(url, params=params) self._raise_for_status(res)
Rename a container. Similar to the ``docker rename`` command. Args: container (str): ID of the container to rename name (str): New name for the container Raises: :py:class:`docker.errors.APIError` If the server returns an error.
388,841
def min_base_size_mask(self, size, hs_dims=None, prune=False): return MinBaseSizeMask(self, size, hs_dims=hs_dims, prune=prune)
Returns MinBaseSizeMask object with correct row, col and table masks. The returned object stores the necessary information about the base size, as well as about the base values. It can create corresponding masks in teh row, column, and table directions, based on the corresponding base values (the values of the unweighted margins). Usage: >>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice >>> cube_slice.min_base_size_mask(30).row_mask >>> cube_slice.min_base_size_mask(50).column_mask >>> cube_slice.min_base_size_mask(22).table_mask
388,842
def check_bitdepth_rescale( palette, bitdepth, transparent, alpha, greyscale): if palette: if len(bitdepth) != 1: raise ProtocolError( "with palette, only a single bitdepth may be used") (bitdepth, ) = bitdepth if bitdepth not in (1, 2, 4, 8): raise ProtocolError( "with palette, bitdepth must be 1, 2, 4, or 8") if transparent is not None: raise ProtocolError("transparent and palette not compatible") if alpha: raise ProtocolError("alpha and palette not compatible") if greyscale: raise ProtocolError("greyscale and palette not compatible") return bitdepth, None if greyscale and not alpha: (bitdepth,) = bitdepth if bitdepth in (1, 2, 4, 8, 16): return bitdepth, None if bitdepth > 8: targetbitdepth = 16 elif bitdepth == 3: targetbitdepth = 4 else: assert bitdepth in (5, 6, 7) targetbitdepth = 8 return targetbitdepth, [(bitdepth, targetbitdepth)] assert alpha or not greyscale depth_set = tuple(set(bitdepth)) if depth_set in [(8,), (16,)]: (bitdepth, ) = depth_set return bitdepth, None targetbitdepth = (8, 16)[max(bitdepth) > 8] return targetbitdepth, [(b, targetbitdepth) for b in bitdepth]
Returns (bitdepth, rescale) pair.
388,843
def frompsl(args): from jcvi.formats.sizes import Sizes p = OptionParser(frompsl.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) pslfile, oldfasta, newfasta = args pf = oldfasta.split(".")[0] chainfile = pf + ".chain" twobitfiles = [] for fastafile in (oldfasta, newfasta): tbfile = faToTwoBit(fastafile) twobitfiles.append(tbfile) oldtwobit, newtwobit = twobitfiles if need_update(pslfile, chainfile): cmd = "axtChain -linearGap=medium -psl {0}".format(pslfile) cmd += " {0} {1} {2}".format(oldtwobit, newtwobit, chainfile) sh(cmd) sortedchain = chainfile.rsplit(".", 1)[0] + ".sorted.chain" if need_update(chainfile, sortedchain): cmd = "chainSort {0} {1}".format(chainfile, sortedchain) sh(cmd) netfile = pf + ".net" oldsizes = Sizes(oldfasta).filename newsizes = Sizes(newfasta).filename if need_update((sortedchain, oldsizes, newsizes), netfile): cmd = "chainNet {0} {1} {2}".format(sortedchain, oldsizes, newsizes) cmd += " {0} /dev/null".format(netfile) sh(cmd) liftoverfile = pf + ".liftover.chain" if need_update((netfile, sortedchain), liftoverfile): cmd = "netChainSubset {0} {1} {2}".\ format(netfile, sortedchain, liftoverfile) sh(cmd)
%prog frompsl old.new.psl old.fasta new.fasta Generate chain file from psl file. The pipeline is describe in: <http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver>
388,844
def as_qubit_order(val: ) -> : if isinstance(val, collections.Iterable): return QubitOrder.explicit(val) if isinstance(val, QubitOrder): return val raise ValueError( "Don't know how to interpret <{}> as a Basis.".format(val))
Converts a value into a basis. Args: val: An iterable or a basis. Returns: The basis implied by the value.
388,845
def add_double_proxy_for(self, label: str, shape: Collection[int] = None) -> Vertex: if shape is None: return Vertex._from_java_vertex(self.unwrap().addDoubleProxyFor(_VertexLabel(label).unwrap())) else: return Vertex._from_java_vertex(self.unwrap().addDoubleProxyFor(_VertexLabel(label).unwrap(), shape))
Creates a proxy vertex for the given label and adds to the sequence item
388,846
def get_object(cls, abbr): obj = get_metadata(abbr) if obj is None: msg = % abbr raise DoesNotExist(msg) return cls(obj)
This particular model needs its own constructor in order to take advantage of the metadata cache in billy.util, which would otherwise return unwrapped objects.
388,847
def daemon_mode(self, args, options): cws = ControlWebSocket(self, args, options) cws.start() if in args and args[]: lcs = LocalControlSocket(self, args, options) lcs.start() lcs.join() cws.join()
Open a ControlWebSocket to SushiBar server and listend for remote commands. Args: args (dict): chef command line arguments options (dict): additional compatibility mode options given on command line
388,848
def set_vcard(self, vcard): iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=vcard, ) yield from self.client.send(iq)
Store the vCard `vcard` for the connected entity. :param vcard: the vCard to store. .. note:: `vcard` should always be derived from the result of `get_vcard` to preserve the elements of the vcard the client does not modify. .. warning:: It is in the responsibility of the user to supply valid vcard data as per :xep:`0054`.
388,849
def swapon(name, priority=None): * ret = {} on_ = swaps() if name in on_: ret[] = on_[name] ret[] = False return ret if __grains__[] == : if __grains__[] != : __salt__[](.format(name), python_shell=False) else: return False else: cmd = .format(name) if priority and not in __grains__[]: cmd += .format(priority) __salt__[](cmd, python_shell=False) on_ = swaps() if name in on_: ret[] = on_[name] ret[] = True return ret return ret
Activate a swap disk .. versionchanged:: 2016.3.2 CLI Example: .. code-block:: bash salt '*' mount.swapon /root/swapfile
388,850
def destinations(stop): from pyruter.api import Departures async def get_destinations(): async with aiohttp.ClientSession() as session: data = Departures(LOOP, stop, session=session) result = await data.get_final_destination() print(json.dumps(result, indent=4, sort_keys=True, ensure_ascii=False)) LOOP.run_until_complete(get_destinations())
Get destination information.
388,851
def read_anchors(ac, qorder, sorder, minsize=0): all_anchors = defaultdict(list) nanchors = 0 anchor_to_block = {} for a, b, idx in ac.iter_pairs(minsize=minsize): if a not in qorder or b not in sorder: continue qi, q = qorder[a] si, s = sorder[b] pair = (qi, si) all_anchors[(q.seqid, s.seqid)].append(pair) anchor_to_block[pair] = idx nanchors += 1 logging.debug("A total of {0} anchors imported.".format(nanchors)) assert nanchors == len(anchor_to_block) return all_anchors, anchor_to_block
anchors file are just (geneA, geneB) pairs (with possible deflines)
388,852
def jeffreys(logu, name=None): with tf.compat.v1.name_scope(name, "jeffreys", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return 0.5 * tf.math.expm1(logu) * logu
The Jeffreys Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Jeffreys Csiszar-function is: ```none f(u) = 0.5 ( u log(u) - log(u) ) = 0.5 kl_forward + 0.5 kl_reverse = symmetrized_csiszar_function(kl_reverse) = symmetrized_csiszar_function(kl_forward) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
388,853
def dataset_search(q=None, type=None, keyword=None, owningOrg=None, publishingOrg=None, hostingOrg=None, decade=None, publishingCountry = None, facet = None, facetMincount=None, facetMultiselect = None, hl = False, limit = 100, offset = None, **kwargs): OCCURRENCECHECKLISTMETADATAOCCURRENCEMETADATAgbifH1descriptionresults url = gbif_baseurl + args = {: q, : type, : keyword, : owningOrg, : publishingOrg, : hostingOrg, : decade, : publishingCountry, : facet, : facetMincount, : facetMultiselect, : hl, : limit, : offset} gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset} if gbif_kwargs is not None: xx = dict(zip( [ re.sub(, , x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() )) args.update(xx) kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset} out = gbif_GET(url, args, **kwargs) return out
Full text search across all datasets. Results are ordered by relevance. :param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*`` :param type: [str] Type of dataset, options include OCCURRENCE, etc. :param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02. :param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations` :param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations` :param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations` :param publishingCountry: [str] Publishing country. :param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below) :param facet: [str] A list of facet names used to retrieve the 100 most frequent values for a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade, and publishingCountry. Additionally subtype and country are legal values but not yet implemented, so data will not yet be returned for them. :param facetMincount: [str] Used in combination with the facet parameter. Set facetMincount={#} to exclude facets with a count less than {#}, e.g. http://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000 only shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have counts less than 10000. :param facetMultiselect: [bool] Used in combination with the facet parameter. Set facetMultiselect=True to still return counts for values that are not currently filtered, e.g. http://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true still shows type values 'OCCURRENCE' and 'METADATA' even though type is being filtered by type=CHECKLIST :param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext search fields. The highlight will be an emphasis tag of class 'gbifH1' e.g. http://api.gbif.org/v1/dataset/search?q=plant&hl=true Fulltext search fields include: title, keyword, country, publishing country, publishing organization title, hosting organization title, and description. One additional full text field is searched which includes information from metadata documents, but the text of this field is not returned in the response. :param limit: [int] Number of results to return. Default: ``300`` :param offset: [int] Record to start at. Default: ``0`` :note: Note that you can pass in additional faceting parameters on a per field basis. For example, if you want to limit the numbef of facets returned from a field ``foo`` to 3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters, but does allow some. See also examples. :return: A dictionary References: http://www.gbif.org/developer/registry#datasetSearch Usage:: from pygbif import registry # Gets all datasets of type "OCCURRENCE". registry.dataset_search(type="OCCURRENCE", limit = 10) # Fulltext search for all datasets having the word "amsterdam" somewhere in # its metadata (title, description, etc). registry.dataset_search(q="amsterdam", limit = 10) # Limited search registry.dataset_search(type="OCCURRENCE", limit=2) registry.dataset_search(type="OCCURRENCE", limit=2, offset=10) # Search by decade registry.dataset_search(decade=1980, limit = 10) # Faceting ## just facets registry.dataset_search(facet="decade", facetMincount=10, limit=0) ## data and facets registry.dataset_search(facet="decade", facetMincount=10, limit=2) ## many facet variables registry.dataset_search(facet=["decade", "type"], facetMincount=10, limit=0) ## facet vars ### per variable paging x = registry.dataset_search( facet = ["decade", "type"], decade_facetLimit = 3, type_facetLimit = 3, limit = 0 ) ## highlight x = registry.dataset_search(q="plant", hl=True, limit = 10) [ z['description'] for z in x['results'] ]
388,854
def hget(self, name, key): with self.pipe as pipe: f = Future() res = pipe.hget(self.redis_key(name), self.memberparse.encode(key)) def cb(): f.set(self._value_decode(key, res.result)) pipe.on_execute(cb) return f
Returns the value stored in the field, None if the field doesn't exist. :param name: str the name of the redis key :param key: the member of the hash :return: Future()
388,855
def init(self): if not self.export_enable: return None self.index=.format(self.index, datetime.utcnow().strftime("%Y.%m.%d")) template_body = { "mappings": { "glances": { "dynamic_templates": [ { "integers": { "match_mapping_type": "long", "mapping": { "type": "integer" } } }, { "strings": { "match_mapping_type": "string", "mapping": { "type": "text", "fields": { "raw": { "type": "keyword", "ignore_above": 256 } } } } } ] } } } try: es = Elasticsearch(hosts=[.format(self.host, self.port)]) except Exception as e: logger.critical("Cannot connect to ElasticSearch server %s:%s (%s)" % (self.host, self.port, e)) sys.exit(2) else: logger.info("Connected to the ElasticSearch server %s:%s" % (self.host, self.port)) try: index_count = es.count(index=self.index)[] except Exception as e: es.indices.create(index=self.index,body=template_body) else: logger.info("The index %s exists and holds %s entries." % (self.index, index_count)) return es
Init the connection to the ES server.
388,856
def apply_T8(word): WORD = word offset = 0 for vv in tail_diphthongs(WORD): i = vv.start(1) + 1 + offset WORD = WORD[:i] + + WORD[i:] offset += 1 RULE = if word != WORD else return WORD, RULE
Split /ie/, /uo/, or /yö/ sequences in syllables that do not take primary stress.
388,857
def _get_one_pending_job(self): pending_job_key, pending_job = self._pending_jobs.popitem() pending_job_state = pending_job.state pending_job_call_stack = pending_job.call_stack pending_job_src_block_id = pending_job.src_block_id pending_job_src_exit_stmt_idx = pending_job.src_exit_stmt_idx self._deregister_analysis_job(pending_job.caller_func_addr, pending_job) l.debug("Target 0x%08x has been traced before. Trying the next one...", pending_exit_addr) self._graph_add_edge(pending_job_src_block_id, pending_job_key, jumpkind="Ijk_FakeRet", stmt_idx=pending_job_src_exit_stmt_idx, ins_addr=pending_job.src_exit_ins_addr) return None pending_job_state.history.jumpkind = job = CFGJob(pending_job_state.addr, pending_job_state, self._context_sensitivity_level, src_block_id=pending_job_src_block_id, src_exit_stmt_idx=pending_job_src_exit_stmt_idx, src_ins_addr=pending_job.src_exit_ins_addr, call_stack=pending_job_call_stack, ) l.debug("Tracing a missing return exit %s", self._block_id_repr(pending_job_key)) return job
Retrieve a pending job. :return: A CFGJob instance or None
388,858
def ensure_schema(self): self._ensure_filename() if not os.path.isfile(self.filename): self.create_schema()
Create file and schema if it does not exist yet.
388,859
def regroup_commands(commands): grouped = [] pending = [] def group_pending(): if not pending: return new_command = grouped_command(pending) result = [] while pending: result.append(pending.pop(0)) grouped.append((new_command, result)) for command, next_command in peek(commands): if can_group_commands(command, next_command): if pending and not can_group_commands(pending[0], command): group_pending() pending.append(command) else: if pending and can_group_commands(pending[0], command): pending.append(command) else: grouped.append((command.clone(), [command])) group_pending() group_pending() return grouped
Returns a list of tuples: [(command_to_run, [list, of, commands])] If the list of commands has a single item, the command was not grouped.
388,860
def straight_throat(target, throat_centroid=, throat_vector=, throat_length=): r network = target.project.network throats = network.map_throats(throats=target.Ts, origin=target) center = network[throat_centroid][throats] vector = network[throat_vector][throats] length = network[throat_length][throats] EP1 = center - 0.5 * length[:, _sp.newaxis] * vector EP2 = center + 0.5 * length[:, _sp.newaxis] * vector return {: EP1, : EP2}
r""" Calculate the coordinates of throat endpoints given a central coordinate, unit vector along the throat direction and a length. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_centroid : string Dictionary key of the throat center coordinates. throat_vector : string Dictionary key of the throat vector pointing along the length of the throats. throat_length : string Dictionary key of the throat length. Returns ------- EP : dictionary Coordinates of throat endpoints stored in Dict form. Can be accessed via the dict keys 'head' and 'tail'.
388,861
def get_par_css_dataframe(self): assert self.jco is not None assert self.pst is not None jco = self.jco.to_dataframe() weights = self.pst.observation_data.loc[jco.index,"weight"].copy().values jco = (jco.T * weights).T dss_sum = jco.apply(np.linalg.norm) css = (dss_sum / float(self.pst.nnz_obs)).to_frame() css.columns = ["pest_css"] self.pst.add_transform_columns() parval1 = self.pst.parameter_data.loc[dss_sum.index,"parval1_trans"].values css.loc[:,"hill_css"] = (dss_sum * parval1) / (float(self.pst.nnz_obs)**2) return css
get a dataframe of composite scaled sensitivities. Includes both PEST-style and Hill-style. Returns ------- css : pandas.DataFrame
388,862
def clear(self): def _clear(node): if node is not None: _clear(node.left) _clear(node.right) node.free() _clear(self._root) self._count = 0 self._root = None
T.clear() -> None. Remove all items from T.
388,863
def StatFSFromClient(args): if platform.system() == "Windows": raise RuntimeError("os.statvfs not available on Windows") for path in args.path_list: try: fd = vfs.VFSOpen(rdf_paths.PathSpec(path=path, pathtype=args.pathtype)) st = fd.StatFS() mount_point = fd.GetMountPoint() except (IOError, OSError): continue unix = rdf_client_fs.UnixVolume(mount_point=mount_point) yield rdf_client_fs.Volume( bytes_per_sector=(st.f_frsize or st.f_bsize), sectors_per_allocation_unit=1, total_allocation_units=st.f_blocks, actual_available_allocation_units=st.f_bavail, unixvolume=unix)
Call os.statvfs for a given list of paths. Args: args: An `rdf_client_action.StatFSRequest`. Yields: `rdf_client_fs.UnixVolume` instances. Raises: RuntimeError: if called on a Windows system.
388,864
def show_vcs_output_vcs_guid(self, **kwargs): config = ET.Element("config") show_vcs = ET.Element("show_vcs") config = show_vcs output = ET.SubElement(show_vcs, "output") vcs_guid = ET.SubElement(output, "vcs-guid") vcs_guid.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
388,865
def OpenFile(self, filepath): archive = False if in filepath: archive = True archive_type = if in filepath: archive = True archive_type = if archive: path, archived_file = filepath.split(archive_type) path += archive_type zip_file = zipfile.ZipFile(path) return zip_file.open(archived_file.strip()) return open(filepath)
open()-replacement that automatically handles zip files. This assumes there is at most one .zip in the file path. Args: filepath: the path to the file to open. Returns: An open file-like object.
388,866
def udf_signature(input_type, pin, klass): nargs = len(input_type) if not nargs: return () if nargs == 1: r, = input_type result = (klass,) + rule_to_python_type(r) + nullable(r) return (result,) return tuple( klass if pin is not None and pin == i else ((klass,) + rule_to_python_type(r) + nullable(r)) for i, r in enumerate(input_type) )
Compute the appropriate signature for a :class:`~ibis.expr.operations.Node` from a list of input types `input_type`. Parameters ---------- input_type : List[ibis.expr.datatypes.DataType] A list of :class:`~ibis.expr.datatypes.DataType` instances representing the signature of a UDF/UDAF. pin : Optional[int] If this is not None, pin the `pin`-th argument type to `klass` klass : Union[Type[pd.Series], Type[SeriesGroupBy]] The pandas object that every argument type should contain Returns ------- Tuple[Type] A tuple of types appropriate for use in a multiple dispatch signature. Examples -------- >>> from pprint import pprint >>> import pandas as pd >>> from pandas.core.groupby import SeriesGroupBy >>> import ibis.expr.datatypes as dt >>> input_type = [dt.string, dt.double] >>> sig = udf_signature(input_type, pin=None, klass=pd.Series) >>> pprint(sig) # doctest: +ELLIPSIS ((<class '...Series'>, <... '...str...'>, <... 'NoneType'>), (<class '...Series'>, <... 'float'>, <... 'numpy.floating'>, <... 'NoneType'>)) >>> not_nullable_types = [ ... dt.String(nullable=False), dt.Double(nullable=False)] >>> sig = udf_signature(not_nullable_types, pin=None, klass=pd.Series) >>> pprint(sig) # doctest: +ELLIPSIS ((<class '...Series'>, <... '...str...'>), (<class '...Series'>, <... 'float'>, <... 'numpy.floating'>)) >>> sig0 = udf_signature(input_type, pin=0, klass=SeriesGroupBy) >>> sig1 = udf_signature(input_type, pin=1, klass=SeriesGroupBy) >>> pprint(sig0) # doctest: +ELLIPSIS (<class '...SeriesGroupBy'>, (<class '...SeriesGroupBy'>, <... 'float'>, <... 'numpy.floating'>, <... 'NoneType'>)) >>> pprint(sig1) # doctest: +ELLIPSIS ((<class '...SeriesGroupBy'>, <... '...str...'>, <... 'NoneType'>), <class '...SeriesGroupBy'>)
388,867
def down_ec2(instance_id, region, access_key_id, secret_access_key): conn = connect_to_ec2(region, access_key_id, secret_access_key) instance = conn.stop_instances(instance_ids=instance_id)[0] while instance.state != "stopped": log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() log_green( % instance.state)
shutdown of an existing EC2 instance
388,868
def writecc (listoflists,file,writetype=,extra=2): if type(listoflists[0]) not in [ListType,TupleType]: listoflists = [listoflists] outfile = open(file,writetype) rowstokill = [] list2print = copy.deepcopy(listoflists) for i in range(len(listoflists)): if listoflists[i] == [] or listoflists[i]== or listoflists[i]==: rowstokill = rowstokill + [i] rowstokill.reverse() for row in rowstokill: del list2print[row] maxsize = [0]*len(list2print[0]) for col in range(len(list2print[0])): items = pstat.colex(list2print,col) items = [pstat.makestr(_) for _ in items] maxsize[col] = max(map(len,items)) + extra for row in listoflists: if row == [] or row == : outfile.write() elif row == [] or row == : dashes = [0]*len(maxsize) for j in range(len(maxsize)): dashes[j] = *(maxsize[j]-2) outfile.write(pstat.lineincustcols(dashes,maxsize)) else: outfile.write(pstat.lineincustcols(row,maxsize)) outfile.write() outfile.close() return None
Writes a list of lists to a file in columns, customized by the max size of items within the columns (max size of items in col, +2 characters) to specified file. File-overwrite is the default. Usage: writecc (listoflists,file,writetype='w',extra=2) Returns: None
388,869
def stringIO(value, allow_empty = False, **kwargs): if not value and not allow_empty: raise errors.EmptyValueError( % value) elif not value: return None if not isinstance(value, io.StringIO): raise ValueError( % (value, type(value))) return value
Validate that ``value`` is a :class:`StringIO <python:io.StringIO>` object. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`StringIO <python:io.StringIO>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises NotStringIOError: if ``value`` is not a :class:`StringIO <python:io.StringIO>` object
388,870
def main(): dir_path= zfile= if in sys.argv: ind=sys.argv.index() dir_path=sys.argv[ind+1] if in sys.argv: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() inspec=sys.argv[ind+1] if in sys.argv: ind=sys.argv.index() zfile=sys.argv[ind+1] inspec=dir_path+"/"+inspec zfile=dir_path+"/"+zfile zredo=open(zfile,"w") specs=[] prior_spec_data=open(inspec,).readlines() for line in prior_spec_data: line=line.replace("Dir"," Dir") line=line.replace("OKir"," OKir") line=line.replace("Fish"," Fish") line=line.replace("Man"," Man") line=line.replace("GC"," GC") line=line.replace("-T"," - T") line=line.replace("-M"," - M") rec=line.split() if len(rec)<2: sys.exit() if rec[1]== or rec[1]==: spec=rec[0] specs.append(spec) comp_name=string.uppercase[specs.count(spec)-1] calculation_type="DE-FM" if rec[1]== and rec[2]=="Kir": calculation_type="DE-BFL" if rec[1]== and rec[2]=="OKir": calculation_type="DE-BFL-A" if rec[1]== and rec[2]=="Fish": calculation_type="DE-FM" if rec[1]== : calculation_type="DE-BFP" min,max=rec[3],rec[5] beg,end="","" if min=="NRM": beg=0 if min[0]==: beg=float(min[1:])*1e-3 elif min[0]==: beg=float(min[1:])+273 if max[0]==: end=float(max[1:])*1e-3 elif max[0]==: end=float(max[1:])+273 if beg==0:beg=273 outstring=%(spec,calculation_type,beg,end,comp_name) zredo.write(outstring)
NAME dir_redo.py DESCRIPTION converts the Cogne DIR format to PmagPy redo file SYNTAX dir_redo.py [-h] [command line options] OPTIONS -h: prints help message and quits -f FILE: specify input file -F FILE: specify output file, default is 'zeq_redo'
388,871
def cee_map_priority_table_map_cos4_pgid(self, **kwargs): config = ET.Element("config") cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map") name_key = ET.SubElement(cee_map, "name") name_key.text = kwargs.pop() priority_table = ET.SubElement(cee_map, "priority-table") map_cos4_pgid = ET.SubElement(priority_table, "map-cos4-pgid") map_cos4_pgid.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
388,872
def modify(self, current_modified_line, anchors, file_path, file_lines=None, index=None): open_wrapper_index = current_modified_line.rfind(self._open) return current_modified_line[:open_wrapper_index - 1] + "\n"
Removes the trailing AnchorHub tag from the end of the line being examined. :param current_modified_line: string representing the the line at file_lines[index] _after_ any previous modifications from other WriterStrategy objects :param anchors: Dictionary mapping string file paths to inner dictionaries. These inner dictionaries map string AnchorHub tags to string generated anchors :param file_path: string representing the file_path of the current file being examined by this WriterStrategy :param file_lines: List of strings corresponding to lines in a text file :param index: index of file_lines corresponding to the current line :return: string. A version of current_modified_line that has the AnchorHub tag removed from the end of it
388,873
def setArg(self, namespace, key, value): assert key is not None assert value is not None namespace = self._fixNS(namespace) self.args[(namespace, key)] = value if not (namespace is BARE_NS): self.namespaces.add(namespace)
Set a single argument in this namespace
388,874
def create_token(self, request, refresh_token=False): if callable(self.expires_in): expires_in = self.expires_in(request) else: expires_in = self.expires_in request.expires_in = expires_in return self.request_validator.get_jwt_bearer_token(None, None, request)
Create a JWT Token, using requestvalidator method.
388,875
def remove_acl(cursor, uuid_, permissions): if not isinstance(permissions, (list, set, tuple,)): raise TypeError("``permissions`` is an invalid type: {}" .format(type(permissions))) permissions = set(permissions) for uid, permission in permissions: cursor.execute(, (uuid_, uid, permission,))
Given a ``uuid`` and a set of permissions given as a tuple of ``uid`` and ``permission``, remove these entries from the database.
388,876
def sqlite_default(): def parse_url(url): if url.endswith() or url.endswith(): if not url.startswith(): url = + url elif url.endswith() or url.endswith(): conf = configparser.SafeConfigParser(allow_no_value=True) conf.optionxform = str conf.read(url) url = conf.get(, ) return url return parse_url
Prepend default scheme if none is specified. This helps provides backwards compatibility with old versions of taxtastic where sqlite was the automatic default database.
388,877
def sin(duration: int, amp: complex, freq: float = None, phase: float = 0, name: str = None) -> SamplePulse: if freq is None: freq = 1/duration return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name)
Generates sine wave `SamplePulse`. Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude. freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle. phase: Pulse phase. name: Name of pulse.
388,878
def datagram_handler(name, logname, host, port): return wrap_log_handler(logging.handlers.DatagramHandler(host, port))
A Bark logging handler logging output to a datagram (UDP) socket. The server listening at the given 'host' and 'port' will be sent a pickled dictionary. Similar to logging.handlers.DatagramHandler.
388,879
def sanitize_ep(endpoint, plural=False): if plural: if endpoint.endswith(): endpoint = endpoint[:-1] + elif not endpoint.endswith(): endpoint += else: endpoint = endpoint[:-1] return endpoint
Sanitize an endpoint to a singular or plural form. Used mostly for convenience in the `_parse` method to grab the raw data from queried datasets. XXX: this is el cheapo (no bastante bien)
388,880
def manager(self, value): "Set the manager object in the global _managers dict." pid = current_process().ident if _managers is None: raise RuntimeError("Can not set the manager following a system exit.") if pid not in _managers: _managers[pid] = value else: raise Exception("Manager already set for pid %s" % pid)
Set the manager object in the global _managers dict.
388,881
def muscle(sequences=None, alignment_file=None, fasta=None, fmt=, as_file=False, maxiters=None, diags=False, gap_open=None, gap_extend=None, muscle_bin=None): fastaclustalfasta if sequences: fasta_string = _get_fasta_string(sequences) elif fasta: fasta_string = open(fasta, ).read() if muscle_bin is None: muscle_bin = os.path.join(BINARY_DIR, .format(platform.system().lower())) aln_format = if fmt == : aln_format = muscle_cline = .format(muscle_bin, aln_format) if maxiters is not None: muscle_cline += .format(maxiters) if diags: muscle_cline += if all([gap_open is not None, gap_extend is not None]): muscle_cline += .format(gap_open, gap_extend) muscle = sp.Popen(str(muscle_cline), stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True, shell=True) if sys.version_info[0] > 2: alignment = muscle.communicate(input=fasta_string)[0] else: alignment = unicode(muscle.communicate(input=fasta_string)[0], ) aln = AlignIO.read(StringIO(alignment), fmt) if as_file: if not alignment_file: alignment_file = tempfile.NamedTemporaryFile().name AlignIO.write(aln, alignment_file, fmt) return alignment_file return aln
Performs multiple sequence alignment with MUSCLE. Args: sequences (list): Sequences to be aligned. ``sequences`` can be one of four things: 1. a FASTA-formatted string 2. a list of BioPython ``SeqRecord`` objects 3. a list of AbTools ``Sequence`` objects 4. a list of lists/tuples, of the format ``[sequence_id, sequence]`` alignment_file (str): Path for the output alignment file. If not supplied, a name will be generated using ``tempfile.NamedTemporaryFile()``. fasta (str): Path to a FASTA-formatted file of sequences. Used as an alternative to ``sequences`` when suppling a FASTA file. fmt (str): Format of the alignment. Options are 'fasta' and 'clustal'. Default is 'fasta'. threads (int): Number of threads (CPU cores) for MUSCLE to use. Default is ``-1``, which results in MUSCLE using all available cores. as_file (bool): If ``True``, returns a path to the alignment file. If ``False``, returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling ``Bio.AlignIO.read()`` on the alignment file). maxiters (int): Passed directly to MUSCLE using the ``-maxiters`` flag. diags (int): Passed directly to MUSCLE using the ``-diags`` flag. gap_open (float): Passed directly to MUSCLE using the ``-gapopen`` flag. Ignored if ``gap_extend`` is not also provided. gap_extend (float): Passed directly to MUSCLE using the ``-gapextend`` flag. Ignored if ``gap_open`` is not also provided. muscle_bin (str): Path to MUSCLE executable. ``abutils`` includes built-in MUSCLE binaries for MacOS and Linux, however, if a different MUSCLE binary can be provided. Default is ``None``, which results in using the appropriate built-in MUSCLE binary. Returns: Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``, in which case the path to the alignment file is returned.
388,882
def make_bindings_type(filenames,color_input,colorkey,file_dictionary,sidebar,bounds): string = selectedText count = 0 for row in filenames: color_input = colorkeyfields = False count += 1 filename = row zoomrange = [,] with open(filename) as data_file: data = json.load(data_file) data = data[] data = data[0] featuretype = data[] featuretype = featuretype[] data = data[] try: colorkeyfields = file_dictionary[filename][str()] except KeyError: colorkeyfields = False except TypeError: colorkeyfields = False if not colorkeyfields == False: if len(colorkeyfields) == 1: colorkey = colorkeyfields[0] colorkeyfields = False try: zoomrange = file_dictionary[filename][str()] except KeyError: zoomrange = [,] except TypeError: zoomrange = [,] if not time == : preloc= % (str(count)) loc = % (filename,count) loc = preloc + loc else: Point\n\n' stringblock = blocky + loc + bindings string += stringblock string = string + async_function_call(count) return string
# logic for instantiating variable colorkey input if not colorkeyfields == False: colorkey = 'selectedText'
388,883
def proximal_quadratic_perturbation(prox_factory, a, u=None): r a = float(a) if a < 0: raise ValueError( .format(a)) if u is not None and not isinstance(u, LinearSpaceElement): raise TypeError( .format(u)) def quadratic_perturbation_prox_factory(sigma): r if np.isscalar(sigma): sigma = float(sigma) else: sigma = np.asarray(sigma) const = 1.0 / np.sqrt(sigma * 2.0 * a + 1) prox = proximal_arg_scaling(prox_factory, const)(sigma) if u is not None: return (MultiplyOperator(const, domain=u.space, range=u.space) * prox * (MultiplyOperator(const, domain=u.space, range=u.space) - sigma * const * u)) else: space = prox.domain return (MultiplyOperator(const, domain=space, range=space) * prox * MultiplyOperator(const, domain=space, range=space)) return quadratic_perturbation_prox_factory
r"""Calculate the proximal of function F(x) + a * \|x\|^2 + <u,x>. Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` a : non-negative float Scaling of the quadratic term u : Element in domain of F, optional Defines the linear functional. For ``None``, the zero element is taken. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a functional :math:`F`, this is calculated according to the rule .. math:: \mathrm{prox}_{\sigma \left(F( \cdot ) + a \| \cdot \|^2 + <u, \cdot >\right)}(x) = c \; \mathrm{prox}_{\sigma F( \cdot \, c)}((x - \sigma u) c) where :math:`c` is the constant .. math:: c = \frac{1}{\sqrt{2 \sigma a + 1}}, :math:`a` is the scaling parameter belonging to the quadratic term, :math:`u` is the space element defining the linear functional, and :math:`\sigma` is the step size. For reference on the identity used, see [CP2011c]. Note that this identity is not the exact one given in the reference, but was recalculated for arbitrary step lengths. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011.
388,884
def delete_field_value(self, name): name = self.get_real_name(name) if name and self._can_write_field(name): if name in self.__modified_data__: self.__modified_data__.pop(name) if name in self.__original_data__ and name not in self.__deleted_fields__: self.__deleted_fields__.append(name)
Mark this field to be deleted
388,885
def values(self): dtypes = [col.dtype for col in self.columns] if len(set(dtypes)) > 1: dtype = object else: dtype = None return np.array(self.columns, dtype=dtype).T
Return data in `self` as a numpy array. If all columns are the same dtype, the resulting array will have this dtype. If there are >1 dtypes in columns, then the resulting array will have dtype `object`.
388,886
def plot(*args, ax=None, **kwargs): if ax is None: fig, ax = _setup_axes() pl = ax.plot(*args, **kwargs) if _np.shape(args)[0] > 1: if type(args[1]) is not str: min_x = min(args[0]) max_x = max(args[0]) ax.set_xlim((min_x, max_x)) return pl
Plots but automatically resizes x axis. .. versionadded:: 1.4 Parameters ---------- args Passed on to :meth:`matplotlib.axis.Axis.plot`. ax : :class:`matplotlib.axis.Axis`, optional The axis to plot to. kwargs Passed on to :meth:`matplotlib.axis.Axis.plot`.
388,887
def _interactive_input_fn(hparams, decode_hp): num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1 decode_length = decode_hp.extra_length input_type = "text" p_hparams = hparams.problem_hparams has_input = "inputs" in p_hparams.modality vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"] const_array_size = 10000 try: import readline except ImportError: pass while True: prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n" " it=<input_type> ( or or , default: " "text)\n" " ns=<num_samples> (changes number of samples, default: 1)\n" " dl=<decode_length> (changes decode length, default: 100)\n" " <%s> (decode)\n" " q (quit)\n" ">" % (num_samples, decode_length, "source_string" if has_input else "target_prefix")) input_string = input(prompt) if input_string == "q": return elif input_string[:3] == "ns=": num_samples = int(input_string[3:]) elif input_string[:3] == "dl=": decode_length = int(input_string[3:]) elif input_string[:3] == "it=": input_type = input_string[3:] else: if input_type == "text": input_ids = vocabulary.encode(input_string) if has_input: input_ids.append(text_encoder.EOS_ID) x = [num_samples, decode_length, len(input_ids)] + input_ids assert len(x) < const_array_size x += [0] * (const_array_size - len(x)) features = { "inputs": np.array(x).astype(np.int32), } elif input_type == "image": input_path = input_string img = vocabulary.encode(input_path) features = { "inputs": img.astype(np.int32), } elif input_type == "label": input_ids = [int(input_string)] x = [num_samples, decode_length, len(input_ids)] + input_ids features = { "inputs": np.array(x).astype(np.int32), } else: raise Exception("Unsupported input type.") for k, v in six.iteritems( problem_lib.problem_hparams_to_features(p_hparams)): features[k] = np.array(v).astype(np.int32) yield features
Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixed-size numpy array. We yield int32 arrays with shape [const_array_size]. The format is: [num_samples, decode_length, len(input ids), <input ids>, <padding>] Args: hparams: model hparams decode_hp: decode hparams Yields: numpy arrays Raises: Exception: when `input_type` is invalid.
388,888
def _validate_oneof(self, definitions, field, value): valids, _errors = \ self.__validate_logical(, definitions, field, value) if valids != 1: self._error(field, errors.ONEOF, _errors, valids, len(definitions))
{'type': 'list', 'logical': 'oneof'}
388,889
def set_change(name, change): * pre_info = info(name) if change == pre_info[]: return True if __grains__[] == : cmd = [, , , name, , change] else: cmd = [, , change, name] __salt__[](cmd, python_shell=False) post_info = info(name) if post_info[] != pre_info[]: return post_info[] == change
Sets the time at which the password expires (in seconds since the UNIX epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on FreeBSD. A value of ``0`` sets the password to never expire. CLI Example: .. code-block:: bash salt '*' shadow.set_change username 1419980400
388,890
def communicate(self, job_ids = None): self.lock() jobs = self.get_jobs(job_ids) for job in jobs: job.refresh() if job.status in (, , ) and job.queue_name != : status = qstat(job.id, context=self.context) if len(status) == 0: job.status = job.result = 70 logger.warn("The job was not executed successfully (maybe a time-out happened). Please check the log files." % job) for array_job in job.array: if array_job.status in (, ): array_job.status = array_job.result = 70 self.session.commit() self.unlock()
Communicates with the SGE grid (using qstat) to see if jobs are still running.
388,891
def LinShuReductionFactor(axiPot,R,sigmar,nonaxiPot=None, k=None,m=None,OmegaP=None): axiPot= flatten(axiPot) from galpy.potential import omegac, epifreq if nonaxiPot is None and (OmegaP is None or k is None or m is None): raise IOError("Need to specify either nonaxiPot= or m=, k=, OmegaP= for LinShuReductionFactor") elif not nonaxiPot is None: OmegaP= nonaxiPot.OmegaP() k= nonaxiPot.wavenumber(R) m= nonaxiPot.m() tepif= epifreq(axiPot,R) s= m*(OmegaP-omegac(axiPot,R))/tepif chi= sigmar**2.*k**2./tepif**2. return (1.-s**2.)/nu.sin(nu.pi*s)\ *integrate.quad(lambda t: nu.exp(-chi*(1.+nu.cos(t)))\ *nu.sin(s*t)*nu.sin(t), 0.,nu.pi)[0]
NAME: LinShuReductionFactor PURPOSE: Calculate the Lin & Shu (1966) reduction factor: the reduced linear response of a kinematically-warm stellar disk to a perturbation INPUT: axiPot - The background, axisymmetric potential R - Cylindrical radius (can be Quantity) sigmar - radial velocity dispersion of the population (can be Quantity) Then either provide: 1) m= m in the perturbation's m x phi (number of arms for a spiral) k= wavenumber (see Binney & Tremaine 2008) OmegaP= pattern speed (can be Quantity) 2) nonaxiPot= a non-axisymmetric Potential instance (such as SteadyLogSpiralPotential) that has functions that return OmegaP, m, and wavenumber OUTPUT: reduction factor HISTORY: 2014-08-23 - Written - Bovy (IAS)
388,892
def register_command(self, namespace, command, method): if method is None: self._logger.error("No method given for %s.%s", namespace, command) return False namespace = (namespace or "").strip().lower() command = (command or "").strip().lower() if not namespace: namespace = DEFAULT_NAMESPACE if not command: self._logger.error("No command name given") return False if namespace not in self._commands: space = self._commands[namespace] = {} else: space = self._commands[namespace] if command in space: self._logger.error( "Command already registered: %s.%s", namespace, command ) return False space[command] = method return True
Registers the given command to the shell. The namespace can be None, empty or "default" :param namespace: The command name space. :param command: The shell name of the command :param method: The method to call :return: True if the method has been registered, False if it was already known or invalid
388,893
def verify(self): if self.all_intervals: try: assert self.top_node.all_children() == self.all_intervals except AssertionError as e: print( ) tivs = set(self.top_node.all_children()) print() try: pprint except NameError: from pprint import pprint pprint(tivs - self.all_intervals) print() pprint(self.all_intervals - tivs) raise e for iv in self: assert isinstance(iv, Interval), ( "Error: Only Interval objects allowed in IntervalTree:" " {0}".format(iv) ) for iv in self: assert not iv.is_null(), ( "Error: Null Interval objects not allowed in IntervalTree:" " {0}".format(iv) ) bound_check = {} for iv in self: if iv.begin in bound_check: bound_check[iv.begin] += 1 else: bound_check[iv.begin] = 1 if iv.end in bound_check: bound_check[iv.end] += 1 else: bound_check[iv.end] = 1 assert set(self.boundary_table.keys()) == set(bound_check.keys()),\ \
## FOR DEBUGGING ONLY ## Checks the table to ensure that the invariants are held.
388,894
def to_table(result): max_count = 20 table, count = [], 0 for role, envs_topos in result.items(): for env, topos in envs_topos.items(): for topo in topos: count += 1 if count > max_count: continue else: table.append([role, env, topo]) header = [, , ] rest_count = 0 if count <= max_count else count - max_count return table, header, rest_count
normalize raw result to table
388,895
def get_object_metadata(self, container, obj, prefix=None): return self._manager.get_object_metadata(container, obj, prefix=prefix)
Returns the metadata for the specified object as a dict.
388,896
def get(self, endpoint, params=None): response = self.get_response(method=, endpoint=endpoint, params=params) resp = self.decode(response=response) if not in resp: resp[] = u return resp
Get items or item in alignak backend If an error occurs, a BackendException is raised. This method builds a response as a dictionary that always contains: _items and _status:: { u'_items': [ ... ], u'_status': u'OK' } :param endpoint: endpoint (API URL) relative from root endpoint :type endpoint: str :param params: parameters for the backend API :type params: dict :return: dictionary as specified upper :rtype: dict
388,897
def generate(self): random_data = os.urandom(100) hash_gen = hashlib.new("sha512") hash_gen.update(random_data) return hash_gen.hexdigest()[:self.token_length]
:return: A new token :rtype: str
388,898
def get_transfers(self, start=0, stop=None, inclusion_states=False): return extended.GetTransfersCommand(self.adapter)( seed=self.seed, start=start, stop=stop, inclusionStates=inclusion_states, )
Returns all transfers associated with the seed. :param start: Starting key index. :param stop: Stop before this index. Note that this parameter behaves like the ``stop`` attribute in a :py:class:`slice` object; the stop index is *not* included in the result. If ``None`` (default), then this method will check every address until it finds one without any transfers. :param inclusion_states: Whether to also fetch the inclusion states of the transfers. This requires an additional API call to the node, so it is disabled by default. :return: Dict with the following structure:: { 'bundles': List[Bundle], Matching bundles, sorted by tail transaction timestamp. This value is always a list, even if only one bundle was found. } References: - https://github.com/iotaledger/wiki/blob/master/api-proposal.md#gettransfers
388,899
def _expand_list(names): if names is None: names = [] elif isinstance(names, basestring): names = [names] results = [] objects = {} for name in names: bucket, key = google.datalab.storage._bucket.parse_name(name) results_len = len(results) if bucket: if not key: results.append( % bucket) elif google.datalab.storage.Object(bucket, key).exists(): results.append( % (bucket, key)) else: if bucket not in objects and key[:1] == : objects[bucket] = [obj.metadata.name for obj in list(google.datalab.storage.Bucket(bucket).objects())] if bucket in objects: candidates = objects[bucket] else: match = re.search(, key) prefix = key if match: prefix = key[0:match.start()] candidates = [obj.metadata.name for obj in google.datalab.storage.Bucket(bucket).objects(prefix=prefix)] for obj in candidates: if fnmatch.fnmatch(obj, key): results.append( % (bucket, obj)) if len(results) == results_len: results.append(name) return results
Do a wildchar name expansion of object names in a list and return expanded list. The objects are expected to exist as this is used for copy sources or delete targets. Currently we support wildchars in the key name only.