code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
def from_epsg_code(code): code = str(code) proj4 = utils.crscode_to_string("epsg", code, "proj4") crs = from_proj4(proj4) return crs
Load crs object from epsg code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The EPSG code as an integer. Returns: - A CS instance of the indicated type.
def get_name(self): paths = [, , , ] for path in paths: tag = self.root.find(path, NS) if tag is not None and len(tag): name = tag.get() if name: return name
Tries to get WF name from 'process' or 'collobration' or 'pariticipant' Returns: str. WF name.
def send(mail, server=): sender = mail.get_sender() rcpt = mail.get_receipients() session = smtplib.SMTP(server) message = MIMEMultipart() message[] = mail.get_subject() message[] = mail.get_sender() message[] = .join(mail.get_to()) message[] = .join(mail.get_cc()) message.preamble = body = MIMEText(mail.get_body().encode("utf-8"), "plain", "utf-8") body.add_header(, ) message.attach(body) for filename in mail.get_attachments(): message.attach(_get_mime_object(filename)) session.sendmail(sender, rcpt, message.as_string())
Sends the given mail. :type mail: Mail :param mail: The mail object. :type server: string :param server: The address of the mailserver.
def _play(self): if self._input_func in self._netaudio_func_list: body = {"cmd0": "PutNetAudioCommand/CurEnter", "cmd1": "aspMainZone_WebUpdateStatus/", "ZoneName": "MAIN ZONE"} try: if self.send_post_command( self._urls.command_netaudio_post, body): self._state = STATE_PLAYING return True else: return False except requests.exceptions.RequestException: _LOGGER.error("Connection error: play command not sent.") return False
Send play command to receiver command via HTTP post.
def get_block_containing_tx(self, txid): blocks = list(backend.query.get_block_with_transaction(self.connection, txid)) if len(blocks) > 1: logger.critical(, txid) return [block[] for block in blocks]
Retrieve the list of blocks (block ids) containing a transaction with transaction id `txid` Args: txid (str): transaction id of the transaction to query Returns: Block id list (list(int))
def append(self, position, array): if not Gauged.map_append(self.ptr, position, array.ptr): raise MemoryError
Append an array to the end of the map. The position must be greater than any positions in the map
def setWorkingPlayAreaSize(self, sizeX, sizeZ): fn = self.function_table.setWorkingPlayAreaSize fn(sizeX, sizeZ)
Sets the Play Area in the working copy.
def DICOMfile_read(self, *args, **kwargs): b_status = False l_tags = [] l_tagsToUse = [] d_tagsInString = {} str_file = "" d_DICOM = { : None, : {}, : , : [], : {}, : {}, : {} } for k, v in kwargs.items(): if k == : str_file = v if k == : l_tags = v if len(args): l_file = args[0] str_file = l_file[0] str_localFile = os.path.basename(str_file) str_path = os.path.dirname(str_file) try: d_DICOM[] = dicom.read_file(str_file) b_status = True except: self.dp.qprint( % os.getcwd(), comms = ) self.dp.qprint( % str_file, comms = ) b_status = False d_DICOM[] = dict(d_DICOM[]) d_DICOM[] = str(d_DICOM[]) d_DICOM[] = d_DICOM[].dir() if len(l_tags): l_tagsToUse = l_tags else: l_tagsToUse = d_DICOM[] if in l_tagsToUse: l_tagsToUse.remove() for key in l_tagsToUse: d_DICOM[][key] = d_DICOM[].data_element(key) try: d_DICOM[][key] = getattr(d_DICOM[], key) except: d_DICOM[][key] = "no attribute" d_DICOM[][key] = str(d_DICOM[][key]) d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem) str_outputFile = d_tagsInString[] return { : b_status, : str_path, : str_localFile, : str_outputFile, : d_DICOM, : l_tagsToUse }
Read a DICOM file and perform some initial parsing of tags. NB! For thread safety, class member variables should not be assigned since other threads might override/change these variables in mid- flight!
def tabify(text, options): opts = parse_options(options) if opts.tab_size < 1: return text else: tab_equiv = * opts.tab_size return text.replace(tab_equiv, )
tabify(text : str, options : argparse.Namespace|str) -> str >>> tabify(' (println "hello world")', '--tab=3') '\t\t (println "hello world")' Replace spaces with tabs
def _glfw_get_version(filename): version_checker_source = args = [sys.executable, , textwrap.dedent(version_checker_source)] process = subprocess.Popen(args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out = process.communicate(_to_char_p(filename))[0] out = out.strip() if out: return eval(out) else: return None
Queries and returns the library version tuple or None by using a subprocess.
def _get_regex_pattern(label): parts = _split_by_punctuation.split(label) for index, part in enumerate(parts): if index % 2 == 0: if not parts[index].isdigit() and len(parts[index]) > 1: parts[index] = _convert_word(parts[index]) else: if not parts[index + 1]: parts[index] = _convert_punctuation( parts[index], current_app.config["CLASSIFIER_SYMBOLS"] ) else: parts[index] = _convert_punctuation( parts[index], current_app.config["CLASSIFIER_SEPARATORS"] ) return "".join(parts)
Return a regular expression of the label. This takes care of plural and different kinds of separators.
def _list_templates(settings): for idx, option in enumerate(settings.config.get("project_templates"), start=1): puts(" {0!s:5} {1!s:36}".format( colored.yellow("[{0}]".format(idx)), colored.cyan(option.get("name")) )) if option.get("url"): puts(" {0}\n".format(option.get("url")))
List templates from settings.
def delete_comment(repo: GithubRepository, comment_id: int) -> None: url = ("https://api.github.com/repos/{}/{}/issues/comments/{}" "?access_token={}".format(repo.organization, repo.name, comment_id, repo.access_token)) response = requests.delete(url) if response.status_code != 204: raise RuntimeError( .format( response.status_code, response.content))
References: https://developer.github.com/v3/issues/comments/#delete-a-comment
def shutdown(self): task = asyncio.ensure_future(self.core.shutdown()) self.loop.run_until_complete(task)
Shutdown the application and exit :returns: No return value
def min(self, constraints, X: BitVec, M=10000): assert isinstance(X, BitVec) return self.optimize(constraints, X, , M)
Iteratively finds the minimum value for a symbol within given constraints. :param constraints: constraints that the expression must fulfil :param X: a symbol or expression :param M: maximum number of iterations allowed
def select_ipam_strategy(self, network_id, network_strategy, **kwargs): LOG.info("Selecting IPAM strategy for network_id:%s " "network_strategy:%s" % (network_id, network_strategy)) net_type = "tenant" if STRATEGY.is_provider_network(network_id): net_type = "provider" strategy = self._ipam_strategies.get(net_type, {}) default = strategy.get("default") overrides = strategy.get("overrides", {}) if network_strategy in overrides: LOG.info("Selected overridden IPAM strategy: %s" % (overrides[network_strategy])) return overrides[network_strategy] if default: LOG.info("Selected default IPAM strategy for tenant " "network: %s" % (default)) return default LOG.info("Selected network strategy for tenant " "network: %s" % (network_strategy)) return network_strategy
Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy.
def generate_events_list(generator): if not localized_events: generator.context[] = sorted(events, reverse = True, key=lambda ev: (ev.dtstart, ev.dtend)) else: generator.context[] = {k: sorted(v, reverse = True, key=lambda ev: (ev.dtstart, ev.dtend)) for k, v in localized_events.items()}
Populate the event_list variable to be used in jinja templates
def __setup(local_download_dir_warc, log_level): if not os.path.exists(local_download_dir_warc): os.makedirs(local_download_dir_warc) configure_logging({"LOG_LEVEL": "ERROR"}) logging.getLogger().setLevel(logging.CRITICAL) logging.getLogger().setLevel(logging.CRITICAL) logging.getLogger().setLevel(logging.CRITICAL) logging.getLogger().setLevel(logging.CRITICAL) logging.getLogger().setLevel(logging.CRITICAL) logging.getLogger().setLevel(logging.CRITICAL) logging.basicConfig(level=log_level) __logger = logging.getLogger(__name__) __logger.setLevel(log_level)
Setup :return:
def asset(self, id): data = None if int(id) > 0: url = self._build_url(, , str(id), base_url=self._api) data = self._json(self._get(url, headers=Release.CUSTOM_HEADERS), 200) return Asset(data, self) if data else None
Returns a single Asset. :param int id: (required), id of the asset :returns: :class:`Asset <github3.repos.release.Asset>`
def deepcp(data): import ujson try: return ujson.loads(ujson.dumps(data)) except Exception: return copy.deepcopy(data)
Use ujson to do deep_copy
def bookmark(ctx): user, project_name, _build = get_build_or_local(ctx.obj.get(), ctx.obj.get()) try: PolyaxonClient().build_job.bookmark(user, project_name, _build) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(_build)) Printer.print_error(.format(e)) sys.exit(1) Printer.print_success("Build job bookmarked.")
Bookmark build job. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon build bookmark ``` \b ```bash $ polyaxon build -b 2 bookmark ```
def tuples(stream, *keys): if not keys: raise PescadorError( ) for data in stream: try: yield tuple(data[key] for key in keys) except TypeError: raise DataError("Malformed data stream: {}".format(data))
Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key.
def find_malformed_single_file_project(self): files = [f for f in os.listdir(".") if os.path.isfile(f)] candidates = [] for file in files: if file.endswith("setup.py") or not file.endswith(".py"): continue candidate = file.replace(".py", "") if candidate != "setup": candidates.append(candidate) return candidates for file in files: if file.endswith("setup.py"): continue if "." not in file: candidate = files try: firstline = self.file_opener.open_this(file, "r").readline() if ( firstline.startswith(" and "python" in firstline and candidate in self.setup_py_source() ): candidates.append(candidate) return candidates except: pass return candidates
Take first non-setup.py python file. What a mess. :return:
def add_section(self, section_name): self.section_headings.append(section_name) if section_name in self.sections: raise ValueError("Section %s already exists." % section_name) self.sections[section_name] = [] return
Create a section of the report, to be headed by section_name Text and images can be added by using the `section` argument of the `add_text` and `add_image` methods. Sections can also be ordered by using the `set_section_order` method. By default, text and images that have no section will be placed after all the sections, in the order they were added. This behavior may be altered using the `sections_first` attribute of the `make_report` method.
def pop_message(self, till=None): if till is not None and not isinstance(till, Signal): Log.error("Expecting a signal") return Null, self.pop(till=till)
RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE DUMMY IMPLEMENTATION FOR DEBUGGING
def aggregationDivide(dividend, divisor): dividendMonthSec = aggregationToMonthsSeconds(dividend) divisorMonthSec = aggregationToMonthsSeconds(divisor) if (dividendMonthSec[] != 0 and divisorMonthSec[] != 0) \ or (dividendMonthSec[] != 0 and divisorMonthSec[] != 0): raise RuntimeError("Aggregation dicts with months/years can only be " "inter-operated with other aggregation dicts that contain " "months/years") if dividendMonthSec[] > 0: return float(dividendMonthSec[]) / divisor[] else: return float(dividendMonthSec[]) / divisorMonthSec[]
Return the result from dividing two dicts that represent date and time. Both dividend and divisor are dicts that contain one or more of the following keys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds', 'microseconds'. For example: :: aggregationDivide({'hours': 4}, {'minutes': 15}) == 16 :param dividend: (dict) The numerator, as a dict representing a date and time :param divisor: (dict) the denominator, as a dict representing a date and time :returns: (float) number of times divisor goes into dividend
def parse_config(self): tree = ElementTree.parse(self.file_xml) root = tree.getroot() for server in root.findall(): destination = server.text name = server.get("name") self.discover_remote(destination, name)
Parse the xml file with remote servers and discover resources on each found server.
def create_key_file(path): iv = "{}{}".format(os.urandom(32), time.time()) new_key = generate_key(ensure_bytes(iv)) with open(path, "wb") as f: f.write(base64.b64encode(new_key)) os.chmod(path, 0o400)
Creates a new encryption key in the path provided and sets the file permissions. Setting the file permissions currently does not work on Windows platforms because of the differences in how file permissions are read and modified.
def outlineColor(self, value): if isinstance(value, Color) and \ not self._outline is None: self._outline[] = value
gets/sets the outlineColor
def extend_with(func): if not func.__name__ in ArgParseInator._plugins: ArgParseInator._plugins[func.__name__] = func
Extends with class or function
def stop(self): self.__stop = True self._queue.stop() self._zk.stop()
Stops the connection
def create_annotation(timestamp, value, host): return zipkin_core.Annotation(timestamp=timestamp, value=value, host=host)
Create a zipkin annotation object :param timestamp: timestamp of when the annotation occured in microseconds :param value: name of the annotation, such as 'sr' :param host: zipkin endpoint object :returns: zipkin annotation object
def get_formset(self): if self._formset is None: self._formset = self.formset_class( self.request.POST or None, initial=self._get_formset_data(), prefix=self._meta.name) return self._formset
Provide the formset corresponding to this DataTable. Use this to validate the formset and to get the submitted data back.
def summarizePosition(self, index): countAtPosition = Counter() excludedCount = 0 for read in self: try: countAtPosition[read.sequence[index]] += 1 except IndexError: excludedCount += 1 return { : excludedCount, : countAtPosition }
Compute residue counts at a specific sequence index. @param index: an C{int} index into the sequence. @return: A C{dict} with the count of too-short (excluded) sequences, and a Counter instance giving the residue counts.
def _snakify_name(self, name): name = self._strip_diacritics(name) name = name.lower() name = name.replace(, ) return name
Snakify a name string. In this context, "to snakify" means to strip a name of all diacritics, convert it to lower case, and replace any spaces inside the name with hyphens. This way the name is made "machine-friendly", and ready to be combined with a second name component into a full "snake_case" name. :param str name: A name to snakify. :return str: A snakified name.
def make(parser): version = parser.add_mutually_exclusive_group() version.add_argument( , nargs=, action=StoreVersion, metavar=, help=, ) version.add_argument( , nargs=, action=StoreVersion, metavar=, help=, ) version.add_argument( , nargs=0, action=StoreVersion, help=, ) version.add_argument( , nargs=, action=StoreVersion, const=, metavar=, help=, ) parser.add_argument( , nargs=, action=StoreVersion, metavar=, help=, ) version.set_defaults( stable=None, release=None, dev=, version_kind=, ) parser.add_argument( , dest=, action=, help=, ) parser.add_argument( , dest=, action=, help=, ) parser.add_argument( , dest=, action=, help=, ) parser.add_argument( , dest=, action=, help=, ) parser.add_argument( , dest=, action=, help=, ) parser.add_argument( , dest=, action=, help=, ) parser.add_argument( , , dest=, action=, help=, ) parser.add_argument( , dest=, action=, help=, ) repo = parser.add_mutually_exclusive_group() repo.add_argument( , dest=, action=, help=, ) repo.add_argument( , dest=, action=, help=, ) repo.add_argument( , action=, help=, ) repo.set_defaults( adjust_repos=True, ) parser.add_argument( , metavar=, nargs=, help=, ) parser.add_argument( , nargs=, const=, default=None, help=, ) parser.add_argument( , nargs=, dest=, help=, ) parser.add_argument( , nargs=, dest=, help= ) parser.add_argument( , action=, help=, ) parser.set_defaults( func=install, )
Install Ceph packages on remote hosts.
def _get_envs_from_ref_paths(self, refs): def _check_ref(env_set, rname): if rname in self.saltenv_revmap: env_set.update(self.saltenv_revmap[rname]) else: if rname == self.base: env_set.add() elif not self.disable_saltenv_mapping: env_set.add(rname) use_branches = in self.ref_types use_tags = in self.ref_types ret = set() if salt.utils.stringutils.is_hex(self.base): ret.add() for ref in salt.utils.data.decode(refs): if ref.startswith(): ref = ref[5:] rtype, rname = ref.split(, 1) if rtype == and use_branches: parted = rname.partition() rname = parted[2] if parted[2] else parted[0] _check_ref(ret, rname) elif rtype == and use_tags: _check_ref(ret, rname) return ret
Return the names of remote refs (stripped of the remote name) and tags which are map to the branches and tags.
def get_account_invitation(self, account_id, invitation_id, **kwargs): kwargs[] = True if kwargs.get(): return self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) else: (data) = self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs) return data
Details of a user invitation. # noqa: E501 An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str invitation_id: The ID of the invitation to be retrieved. (required) :return: UserInvitationResp If the method is called asynchronously, returns the request thread.
def get_path_contents(self, project, provider_name, service_endpoint_id=None, repository=None, commit_or_branch=None, path=None): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if provider_name is not None: route_values[] = self._serialize.url(, provider_name, ) query_parameters = {} if service_endpoint_id is not None: query_parameters[] = self._serialize.query(, service_endpoint_id, ) if repository is not None: query_parameters[] = self._serialize.query(, repository, ) if commit_or_branch is not None: query_parameters[] = self._serialize.query(, commit_or_branch, ) if path is not None: query_parameters[] = self._serialize.query(, path, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters) return self._deserialize(, self._unwrap_collection(response))
GetPathContents. [Preview API] Gets the contents of a directory in the given source code repository. :param str project: Project ID or project name :param str provider_name: The name of the source provider. :param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit. :param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories. :param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved. :param str path: The path contents to list, relative to the root of the repository. :rtype: [SourceRepositoryItem]
def get_pager_spec(self): self_config = self.get_config() pagercmd = self_config.get() istty = self_config.getboolean() core_config = self.get_config() if pagercmd is None: pagercmd = core_config.get() if istty is None: istty = core_config.get() return { "pagercmd": pagercmd, "istty": istty }
Find the best pager settings for this command. If the user has specified overrides in the INI config file we prefer those.
def resolve(self, symbol): if symbol not in self._resolve_cache: result = None if self._group is not None: for ep in pkg_resources.iter_entry_points(self._group, symbol): try: result = ep.load() except (ImportError, AttributeError, pkg_resources.UnknownExtra): continue break self._resolve_cache[symbol] = result return self._resolve_cache[symbol]
Resolve a symbol using the entrypoint group. :param symbol: The symbol being resolved. :returns: The value of that symbol. If the symbol cannot be found, or if no entrypoint group was passed to the constructor, will return ``None``.
def unmount_loopbacks(self): self._index_loopbacks() for dev in self.find_loopbacks(): _util.check_output_([, , dev])
Unmounts all loopback devices as identified by :func:`find_loopbacks`
def get_exit_code(self): if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA: dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION else: dwAccess = win32.PROCESS_QUERY_INFORMATION return win32.GetExitCodeProcess( self.get_handle(dwAccess) )
@rtype: int @return: Process exit code, or C{STILL_ACTIVE} if it's still alive. @warning: If a process returns C{STILL_ACTIVE} as it's exit code, you may not be able to determine if it's active or not with this method. Use L{is_alive} to check if the process is still active. Alternatively you can call L{get_handle} to get the handle object and then L{ProcessHandle.wait} on it to wait until the process finishes running.
def create_port_postcommit(self, context): vlan_segment, vxlan_segment = self._get_segments( context.top_bound_segment, context.bottom_bound_segment) if not self._is_valid_segment(vlan_segment): return port = context.current if self._is_supported_deviceowner(port): if nexus_help.is_baremetal(context.current): all_switches, active_switches = ( self._get_baremetal_switches(context.current)) else: host_id = context.current.get(bc.portbindings.HOST_ID) all_switches, active_switches = ( self._get_host_switches(host_id)) verified_active_switches = [] for switch_ip in active_switches: try: self.driver.get_nexus_type(switch_ip) verified_active_switches.append(switch_ip) except Exception as e: LOG.error("Failed to ping " "switch ip %(switch_ip)s error %(exp_err)s", {: switch_ip, : e}) LOG.debug("Create Stats: thread %(thid)d, " "all_switches %(all)d, " "active %(active)d, verified %(verify)d", {: threading.current_thread().ident, : len(all_switches), : len(active_switches), : len(verified_active_switches)}) if all_switches and not verified_active_switches: raise excep.NexusConnectFailed( nexus_host=all_switches[0], config="None", exc="Create Failed: Port event can not " "be processed at this time.")
Create port non-database commit event.
def proc_collector(process_map, args, pipeline_string): arguments_list = [] if args.detailed_list: arguments_list += [ "input_type", "output_type", "description", "dependencies", "conflicts", "directives" ] if args.short_list: arguments_list += [ "description" ] if arguments_list: procs_dict = {} for name, cls in process_map.items(): cls_inst = cls(template=name) if pipeline_string: if name not in pipeline_string: continue d = {arg_key: vars(cls_inst)[arg_key] for arg_key in vars(cls_inst) if arg_key in arguments_list} procs_dict[name] = d procs_dict_parser(procs_dict) sys.exit(0)
Function that collects all processes available and stores a dictionary of the required arguments of each process class to be passed to procs_dict_parser Parameters ---------- process_map: dict The dictionary with the Processes currently available in flowcraft and their corresponding classes as values args: argparse.Namespace The arguments passed through argparser that will be access to check the type of list to be printed pipeline_string: str the pipeline string
def write(obj, data=None, **kwargs): if obj.REDIS_ENABLED_FOR_DYNACONF is False: raise RuntimeError( "Redis is not configured \n" "export REDIS_ENABLED_FOR_DYNACONF=true\n" "and configure the REDIS_FOR_DYNACONF_* variables" ) client = StrictRedis(**obj.REDIS_FOR_DYNACONF) holder = obj.get("ENVVAR_PREFIX_FOR_DYNACONF") data = data or {} data.update(kwargs) if not data: raise AttributeError("Data must be provided") redis_data = { key.upper(): unparse_conf_data(value) for key, value in data.items() } client.hmset(holder.upper(), redis_data) load(obj)
Write a value in to loader source :param obj: settings object :param data: vars to be stored :param kwargs: vars to be stored :return:
def _UploadChunk(self, chunk): blob = _CompressedDataBlob(chunk) self._action.ChargeBytesToSession(len(chunk.data)) self._action.SendReply(blob, session_id=self._TRANSFER_STORE_SESSION_ID) return rdf_client_fs.BlobImageChunkDescriptor( digest=hashlib.sha256(chunk.data).digest(), offset=chunk.offset, length=len(chunk.data))
Uploads a single chunk to the transfer store flow. Args: chunk: A chunk to upload. Returns: A `BlobImageChunkDescriptor` object.
def delete_field(field_uri): root = T.DeleteItemField( T.FieldURI(FieldURI=field_uri) ) return root
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of appending. <t:DeleteItemField> <t:FieldURI FieldURI="calendar:Resources"/> </t:DeleteItemField>
def check_config(config, data): essential_keys = [, , ] for key in essential_keys: if not key in config.keys(): raise ValueError( % key) if not in config.keys() or not config[]: config[] = 1E-5 if not config.get(, False): config[] = 1000 if config[] < np.min(data[]): config[] = np.min(data[]) if fabs(config[] < 1E-15): raise ValueError() return config
Check config file inputs :param dict config: Configuration settings for the function
def _generate_phrases(self, sentences): phrase_list = set() for sentence in sentences: word_list = [word.lower() for word in wordpunct_tokenize(sentence)] phrase_list.update(self._get_phrase_list_from_words(word_list)) return phrase_list
Method to generate contender phrases given the sentences of the text document. :param sentences: List of strings where each string represents a sentence which forms the text. :return: Set of string tuples where each tuple is a collection of words forming a contender phrase.
def _lexists(self, path): erenced.' try: return bool(self._lstat(path)) except os.error: return False
IMPORTANT: expects `path` to already be deref()'erenced.
def normalize_hex(hex_color): hex_color = hex_color.replace(, ).lower() length = len(hex_color) if length in (6, 8): return + hex_color if length not in (3, 4): return None strhex = u % ( hex_color[0] * 2, hex_color[1] * 2, hex_color[2] * 2) if length == 4: strhex += hex_color[3] * 2 return strhex
Transform a xxx hex color to xxxxxx.
def _filter_dependencies_graph(self, internal): graph = collections.defaultdict(set) for importee, importers in self.stats["dependencies"].items(): for importer in importers: package = self._module_pkg.get(importer, importer) is_inside = importee.startswith(package) if is_inside and internal or not is_inside and not internal: graph[importee].add(importer) return graph
build the internal or the external depedency graph
def get_table_meta(self, db_patterns, tbl_patterns, tbl_types): self.send_get_table_meta(db_patterns, tbl_patterns, tbl_types) return self.recv_get_table_meta()
Parameters: - db_patterns - tbl_patterns - tbl_types
def get(self, *args, **kwargs): value, stat = super(XClient, self).get(*args, **kwargs) try: if value is not None: value = value.decode(encoding="utf-8") except UnicodeDecodeError: pass return (value, stat)
wraps the default get() and deals with encoding
def _get_response_body_mime_type(self): mime_type = self._get_response_mime_type() if mime_type is AtomMime: mime_type = XmlMime return mime_type
Returns the response body MIME type. This might differ from the overall response mime type e.g. in ATOM responses where the body MIME type is XML.
def link(origin=None, rel=None, value=None, attributes=None, source=None): s current link, or on provided parameters :param origin: IRI/string, or list of same; origins for the created relationships. If None, the action context provides the parameter. :param rel: IRI/string, or list of same; IDs for the created relationships. If None, the action context provides the parameter. :param value: IRI/string, or list of same; values/targets for the created relationships. If None, the action context provides the parameter. :param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params :return: Versa action function to do the actual work Link source must be a pattern action function') contexts = source(ctx) for ctx in contexts: ctx.output_model.add(ctx.current_link[ORIGIN], ctx.current_link[RELATIONSHIP], ctx.current_link[TARGET], attributes) return (o, r, v, a) = ctx.current_link _origin = origin(ctx) if callable(origin) else origin o_list = [o] if _origin is None else (_origin if isinstance(_origin, list) else [_origin]) _rel = rel(ctx) if callable(rel) else rel r_list = [r] if _rel is None else (_rel if isinstance(_rel, list) else [_rel]) _value = value(ctx) if callable(value) else value v_list = [v] if _value is None else (_value if isinstance(_value, list) else [_value]) _attributes = attributes(ctx) if callable(attributes) else attributes for (o, r, v, a) in [ (o, r, v, a) for o in o_list for r in r_list for v in v_list ]: ctx.output_model.add(o, r, v, attributes) return return _link
Action function generator to create a link based on the context's current link, or on provided parameters :param origin: IRI/string, or list of same; origins for the created relationships. If None, the action context provides the parameter. :param rel: IRI/string, or list of same; IDs for the created relationships. If None, the action context provides the parameter. :param value: IRI/string, or list of same; values/targets for the created relationships. If None, the action context provides the parameter. :param source: pattern action to be executed, generating contexts to determine the output statements. If given, overrides specific origin, rel or value params :return: Versa action function to do the actual work
def show(block=False): if not has_matplotlib(): raise ImportError() cs = [_mpl_to_vispy(plt.figure(ii)) for ii in plt.get_fignums()] if block and len(cs) > 0: cs[0].app.run() return cs
Show current figures using vispy Parameters ---------- block : bool If True, blocking mode will be used. If False, then non-blocking / interactive mode will be used. Returns ------- canvases : list List of the vispy canvases that were created.
def cumprod(self, axis=0, *args, **kwargs): nv.validate_groupby_func(, args, kwargs, [, ]) if axis != 0: return self.apply(lambda x: x.cumprod(axis=axis, **kwargs)) return self._cython_transform(, **kwargs)
Cumulative product for each group.
def _remove_init_all(r): new_r = redbaron.NodeList() for n in r.node_list: if n.type == and n.target.value == : pass else: new_r.append(n) return new_r
Remove any __all__ in __init__.py file.
def shaddalike(partial, fully): if not has_shadda(partial): return True elif not has_shadda(fully) and has_shadda(partial): return False partial = strip_harakat(partial) fully = strip_harakat(fully) pstack = stack.Stack(partial) vstack = stack.Stack(fully) plast = pstack.pop() vlast = vstack.pop() while plast != None and vlast != None: if plast == vlast: plast = pstack.pop() vlast = vstack.pop() elif plast == SHADDA and vlast != SHADDA: break elif plast != SHADDA and vlast == SHADDA: vlast = vstack.pop() else: break if not (pstack.is_empty() and vstack.is_empty()): return False else: return True
If the two words has the same letters and the same harakats, this fuction return True. The first word is partially vocalized, the second is fully if the partially contians a shadda, it must be at the same place in the fully @param partial: the partially vocalized word @type partial: unicode @param fully: the fully vocalized word @type fully: unicode @return: if contains shadda @rtype: Boolean
def get_latex_expression(s, pos, **parse_flags): return LatexWalker(s, **parse_flags).get_latex_expression(pos=pos)
Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the expression, and `len` is its length. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_expression()` instead.
def send_error(self, status_code: int = 500, **kwargs: Any) -> None: if self._headers_written: gen_log.error("Cannot send error response after headers written") if not self._finished: try: self.finish() except Exception: gen_log.error("Failed to flush partial response", exc_info=True) return self.clear() reason = kwargs.get("reason") if "exc_info" in kwargs: exception = kwargs["exc_info"][1] if isinstance(exception, HTTPError) and exception.reason: reason = exception.reason self.set_status(status_code, reason=reason) try: self.write_error(status_code, **kwargs) except Exception: app_log.error("Uncaught exception in write_error", exc_info=True) if not self._finished: self.finish()
Sends the given HTTP error code to the browser. If `flush()` has already been called, it is not possible to send an error, so this method will simply terminate the response. If output has been written but not yet flushed, it will be discarded and replaced with the error page. Override `write_error()` to customize the error page that is returned. Additional keyword arguments are passed through to `write_error`.
def add(self, original_index, operation): self.index_map.append(original_index) self.ops.append(operation)
Add an operation to this Run instance. :Parameters: - `original_index`: The original index of this operation within a larger bulk operation. - `operation`: The operation document.
def parse_voc_rec(filename): import xml.etree.ElementTree as ET tree = ET.parse(filename) objects = [] for obj in tree.findall(): obj_dict = dict() obj_dict[] = obj.find().text obj_dict[] = int(obj.find().text) bbox = obj.find() obj_dict[] = [int(bbox.find().text), int(bbox.find().text), int(bbox.find().text), int(bbox.find().text)] objects.append(obj_dict) return objects
parse pascal voc record into a dictionary :param filename: xml file path :return: list of dict
def update_project(self, project_id, name=None, description=None, reference_language=None): kwargs = {} if name is not None: kwargs[] = name if description is not None: kwargs[] = description if reference_language is not None: kwargs[] = reference_language data = self._run( url_path="projects/update", id=project_id, **kwargs ) return data[][][]
Updates project settings (name, description, reference language) If optional parameters are not sent, their respective fields are not updated.
def bits_to_dict(bits): cleaned_bits = [bit[:-1] if bit.endswith() else bit for bit in bits] options = dict(bit.split() for bit in cleaned_bits) for key in options: if options[key] == "" or options[key] == "": options[key] = options[key].title() options[key] = ast.literal_eval(options[key]) return options
Convert a Django template tag's kwargs into a dictionary of Python types. The only necessary types are number, boolean, list, and string. http://pygments.org/docs/formatters/#HtmlFormatter from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"] to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],}
def loadCommandMap(Class, subparsers=None, instantiate=True, **cmd_kwargs): if not Class._registered_commands: raise ValueError("No commands have been registered with {}" .format(Class)) all = {} for Cmd in set(Class._registered_commands[Class].values()): cmd = Cmd(subparsers=subparsers, **cmd_kwargs) \ if instantiate else Cmd for name in [Cmd.name()] + Cmd.aliases(): all[name] = cmd return all
Instantiate each registered command to a dict mapping name/alias to instance. Due to aliases, the returned length may be greater there the number of commands, but the unique instance count will match.
def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None, num=None, aux_note=None, label_store=None, fs=None, custom_labels=None, write_dir=): annotation = Annotation(record_name=record_name, extension=extension, sample=sample, symbol=symbol, subtype=subtype, chan=chan, num=num, aux_note=aux_note, label_store=label_store, fs=fs, custom_labels=custom_labels) if symbol is None: if label_store is None: raise Exception("Either the field or the field must be set") else: if label_store is None: annotation.sym_to_aux() else: raise Exception("Only one of the and fields may be input, for describing annotation labels") annotation.wrann(write_fs=True, write_dir=write_dir)
Write a WFDB annotation file. Specify at least the following: - The record name of the WFDB record (record_name) - The annotation file extension (extension) - The annotation locations in samples relative to the beginning of the record (sample) - Either the numerical values used to store the labels (`label_store`), or more commonly, the display symbols of each label (`symbol`). Parameters ---------- record_name : str The string name of the WFDB record to be written (without any file extensions). extension : str The string annotation file extension. sample : numpy array A numpy array containing the annotation locations in samples relative to the beginning of the record. symbol : list, or numpy array, optional The symbols used to display the annotation labels. List or numpy array. If this field is present, `label_store` must not be present. subtype : numpy array, optional A numpy array containing the marked class/category of each annotation. chan : numpy array, optional A numpy array containing the signal channel associated with each annotation. num : numpy array, optional A numpy array containing the labelled annotation number for each annotation. aux_note : list, optional A list containing the auxiliary information string (or None for annotations without notes) for each annotation. label_store : numpy array, optional A numpy array containing the integer values used to store the annotation labels. If this field is present, `symbol` must not be present. fs : int, or float, optional The numerical sampling frequency of the record to be written to the file. custom_labels : pandas dataframe, optional The map of custom defined annotation labels used for this annotation, in addition to the standard WFDB annotation labels. Custom labels are defined by two or three fields: - The integer values used to store custom annotation labels in the file (optional) - Their short display symbols - Their long descriptions. This input argument may come in four formats: 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description'] 2. A pandas.DataFrame object with columns: ['symbol', 'description'] If this option is chosen, label_store values are automatically chosen. 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description). 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description). If this option is chosen, label_store values are automatically chosen. If the `label_store` field is given for this function, and `custom_labels` is defined, `custom_labels` must contain `label_store` in its mapping. ie. it must come in format 1 or 3 above. write_dir : str, optional The directory in which to write the annotation file Notes ----- This is a gateway function, written as a simple way to write WFDB annotation files without needing to explicity create an Annotation object. You may also create an Annotation object, manually set its attributes, and call its `wrann` instance method. Each annotation stored in a WFDB annotation file contains a sample field and a label field. All other fields may or may not be present. Examples -------- >>> # Read an annotation as an Annotation object >>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb') >>> # Write a copy of the annotation file >>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
def make_empty(self, axes=None): if axes is None: axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]] if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes)
return an empty BlockManager with the items axis of len 0
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): key = natsort_keygen(key, alg) return sorted(seq, reverse=reverse, key=key)
Sorts an iterable naturally. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the iterable. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out: list The sorted input. See Also -------- natsort_keygen : Generates the key that makes natural sorting possible. realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``. humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``. index_natsorted : Returns the sorted indexes from `natsorted`. Examples -------- Use `natsorted` just like the builtin `sorted`:: >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num3', {u}'num5']
def write_results(self, data, name=None): if name: filepath = os.path.abspath(name) else: filepath = os.path.join(os.path.getcwd(), "results.json") with open(filepath, "w", encoding="utf8") as f: try: f.write(unicode(json.dumps(data, indent=4))) except NameError: f.write(json.dumps(data, indent=4))
Write JSON to file with the specified name. :param name: Path to the file to be written to. If no path is passed a new JSON file "results.json" will be created in the current working directory. :param output: JSON object.
async def is_object_synced_to_cn(self, client, pid): try: await client.describe(pid) except d1_common.types.exceptions.DataONEException: return False return True
Check if object with {pid} has successfully synced to the CN. CNRead.describe() is used as it's a light-weight HTTP HEAD request. This assumes that the call is being made over a connection that has been authenticated and has read or better access on the given object if it exists.
def main(): global args, server_address parser = ArgumentParser(description=__doc__) parser.add_argument( "host", nargs=, help="address of host (default %r)" % (SERVER_HOST,), default=SERVER_HOST, ) parser.add_argument( "port", nargs=, type=int, help="server port (default %r)" % (SERVER_PORT,), default=SERVER_PORT, ) parser.add_argument( "--hello", action="store_true", default=False, help="send a hello message", ) parser.add_argument( "--connect-timeout", nargs=, type=int, help="idle connection timeout", default=CONNECT_TIMEOUT, ) parser.add_argument( "--idle-timeout", nargs=, type=int, help="idle connection timeout", default=IDLE_TIMEOUT, ) args = parser.parse_args() if _debug: _log.debug("initialization") if _debug: _log.debug(" - args: %r", args) host = args.host port = args.port server_address = (host, port) if _debug: _log.debug(" - server_address: %r", server_address) this_console = ConsoleClient() if _debug: _log.debug(" - this_console: %r", this_console) this_middle_man = MiddleMan() if _debug: _log.debug(" - this_middle_man: %r", this_middle_man) this_director = TCPClientDirector( connect_timeout=args.connect_timeout, idle_timeout=args.idle_timeout, ) if _debug: _log.debug(" - this_director: %r", this_director) bind(this_console, this_middle_man, this_director) bind(MiddleManASE(), this_director) task_manager = TaskManager() if _debug: _log.debug(" - task_manager: %r", task_manager) if _debug: _log.debug("running") run() if _debug: _log.debug("fini")
Main function, called when run as an application.
def get_root_objective_bank_ids(self, alias): url_path = self._urls.roots(alias) return self._get_request(url_path)
Gets the root objective bank Ids in this hierarchy. return: (osid.id.IdList) - the root objective bank Ids raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented.
def load_class(location): mod_name, cls_name = location = location.strip().split() tokens = mod_name.split() fromlist = if len(tokens) > 1: fromlist = .join(tokens[:-1]) module = __import__(mod_name, fromlist=fromlist) try: return getattr(module, cls_name) except AttributeError: raise ImportError("%r not found in %r" % (cls_name, mod_name))
Take a string of the form 'fedmsg.consumers.ircbot:IRCBotConsumer' and return the IRCBotConsumer class.
def _process_sample (self, ap1, ap2, ap3, triple, tflags): np.divide (triple, np.abs (triple), triple) phase = np.angle (triple) self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap1, phase, tflags + 0.) self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap2, phase, tflags + 0.) self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap3, phase, tflags + 0.) triple = np.dot (triple, tflags) / tflags.sum () phase = np.angle (triple) self.global_stats_by_time.accum (self.cur_time, phase) self.ap_stats_by_ddid[self.cur_ddid].accum (ap1, phase) self.ap_stats_by_ddid[self.cur_ddid].accum (ap2, phase) self.ap_stats_by_ddid[self.cur_ddid].accum (ap3, phase) self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap2), phase) self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap3), phase) self.bp_stats_by_ddid[self.cur_ddid].accum ((ap2, ap3), phase) self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap1, phase) self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap2, phase) self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap3, phase)
We have computed one independent phase closure triple in one timeslot.
def get(self, key, default=None, *, section=DataStoreDocumentSection.Data): key_notation = .join([section, key]) try: return self._decode_value(self._data_from_dotnotation(key_notation, default)) except KeyError: return None
Return the field specified by its key from the specified section. This method access the specified section of the workflow document and returns the value for the given key. Args: key (str): The key pointing to the value that should be retrieved. It supports MongoDB's dot notation for nested fields. default: The default value that is returned if the key does not exist. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: object: The value from the field that the specified key is pointing to. If the key does not exist, the default value is returned. If no default value is provided and the key does not exist ``None`` is returned.
def add_dict_to_cookiejar(cj, cookie_dict): cj2 = cookiejar_from_dict(cookie_dict) cj.update(cj2) return cj
Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar.
def render(self, context=None): ctx = context.render() if context else self.get_error_context().render() return "%s: %s%s%s" % ( self.get_error_kind(), self.get_error_message(), (" (%s)." % ctx) if ctx else "", self.get_additional_error_detail() )
Renders the error message, optionally using the given context (which, if specified, will override the internal context).
def _get_size(fileno): import fcntl import termios buf = array.array(b if six.PY2 else u, [0, 0, 0, 0]) fcntl.ioctl(fileno, termios.TIOCGWINSZ, buf) return buf[0], buf[1]
Get the size of this pseudo terminal. :param fileno: stdout.fileno() :returns: A (rows, cols) tuple.
def to_json(df, values): records = [] if df.empty: return {"data": []} sum_ = float(np.sum([df[c].iloc[0] for c in values])) for c in values: records.append({ "label": values[c], "value": "%.2f"%np.around(df[c].iloc[0] / sum_, decimals=2) }) return { "data" : records }
Format output for the json response.
def set_chebyshev_approximators(self, deg_forward=50, deg_backwards=200): r from fluids.optional.pychebfun import Chebfun to_fit = lambda h: self.V_from_h(h, ) self.c_backward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.V_total], N=deg_backwards).coefficients().tolist() self.h_from_V_cheb = lambda x : chebval((2.0*x-self.V_total)/(self.V_total), self.c_backward) self.chebyshev = True
r'''Method to derive and set coefficients for chebyshev polynomial function approximation of the height-volume and volume-height relationship. A single set of chebyshev coefficients is used for the entire height- volume and volume-height relationships respectively. The forward relationship, `V_from_h`, requires far fewer coefficients in its fit than the reverse to obtain the same relative accuracy. Optionally, deg_forward or deg_backwards can be set to None to try to automatically fit the series to machine precision. Parameters ---------- deg_forward : int, optional The degree of the chebyshev polynomial to be created for the `V_from_h` curve, [-] deg_backwards : int, optional The degree of the chebyshev polynomial to be created for the `h_from_V` curve, [-]
def keep_session_alive(self): try: self.resources() except xmlrpclib.Fault as fault: if fault.faultCode == 5: self.login() else: raise
If the session expired, logs back in.
def createElementsFromHTML(cls, html, encoding=): parser = cls(encoding=encoding) parser.parseStr(html) rootNode = parser.getRoot() rootNode.remove() if isInvisibleRootTag(rootNode): return rootNode.children return [rootNode]
createElementsFromHTML - Creates elements from provided html, and returns a list of the root-level elements children of these root-level nodes are accessable via the usual means. @param html <str> - Some html data @param encoding <str> - Encoding to use for document @return list<AdvancedTag> - The root (top-level) tags from parsed html. NOTE: If there is text outside the tags, they will be lost in this. Use createBlocksFromHTML instead if you need to retain both text and tags. Also, if you are just appending to an existing tag, use AdvancedTag.appendInnerHTML
def l_endian(v): w = struct.pack(, v) return str(binascii.hexlify(w), encoding=)
小端序
def set_exception(self, exception): was_handled = self._finish(self.errbacks, exception) if not was_handled: traceback.print_exception( type(exception), exception, exception.__traceback__)
Signal unsuccessful completion.
def set_position_target_global_int_send(self, time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False): return self.send(self.set_position_target_global_int_encode(time_boot_ms, target_system, target_component, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1)
Sets a desired vehicle position, velocity, and/or acceleration in a global coordinate system (WGS84). Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_mac(self, **kwargs): config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop() remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop() remote_interface_mac = ET.SubElement(lldp_neighbor_detail, "remote-interface-mac") remote_interface_mac.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
def init_variables(self, verbose=False): for j in range(1, self.nodes): nb_parents = np.random.randint(0, min([self.parents_max, j])+1) for i in np.random.choice(range(0, j), nb_parents, replace=False): self.adjacency_matrix[i, j] = 1 try: self.g = nx.DiGraph(self.adjacency_matrix) assert not list(nx.simple_cycles(self.g)) except AssertionError: if verbose: print("Regenerating, graph non valid...") self.init_variables() self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])), self.points, self.noise, noise_coeff=self.noise_coeff) if sum(self.adjacency_matrix[:, i]) else self.initial_generator for i in range(self.nodes)]
Redefine the causes of the graph.
def path_complete(self, text: str, line: str, begidx: int, endidx: int, path_filter: Optional[Callable[[str], bool]] = None) -> List[str]: def complete_users() -> List[str]: if sys.platform.startswith(): expanded_path = os.path.expanduser(text) if os.path.isdir(expanded_path): user = text if add_trailing_sep_if_dir: user += os.path.sep users.append(user) else: import pwd for cur_pw in pwd.getpwall(): if os.path.isdir(cur_pw.pw_dir): cur_user = + cur_pw.pw_name if cur_user.startswith(text): if add_trailing_sep_if_dir: cur_user += os.path.sep users.append(cur_user) return users add_trailing_sep_if_dir = False if endidx == len(line) or (endidx < len(line) and line[endidx] != os.path.sep): add_trailing_sep_if_dir = True cwd = os.getcwd() cwd_added = False orig_tilde_path = expanded_tilde_path = if not text: search_str = os.path.join(os.getcwd(), ) cwd_added = True else: if path_filter is not None: matches = [c for c in matches if path_filter(c)] if os.path.isdir(cur_match) and add_trailing_sep_if_dir: matches[index] += os.path.sep self.display_matches[index] += os.path.sep if cwd_added: if cwd == os.path.sep: to_replace = cwd else: to_replace = cwd + os.path.sep matches = [cur_path.replace(to_replace, , 1) for cur_path in matches] if expanded_tilde_path: matches = [cur_path.replace(expanded_tilde_path, orig_tilde_path, 1) for cur_path in matches] return matches
Performs completion of local file system paths :param text: the string prefix we are attempting to match (all returned matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param path_filter: optional filter function that determines if a path belongs in the results this function takes a path as its argument and returns True if the path should be kept in the results :return: a list of possible tab completions
def _create_session(self, scope): now = datetime.datetime.utcnow() if self.session is None or self.expires_at is None or now >= self.expires_at: if self.session: self.session.close() oauth_access_token, expires_at = self._get_oauth_access_token( self.enterprise_configuration.key, self.enterprise_configuration.secret, self.enterprise_configuration.degreed_user_id, self.enterprise_configuration.degreed_user_password, scope ) session = requests.Session() session.timeout = self.SESSION_TIMEOUT session.headers[] = .format(oauth_access_token) session.headers[] = self.session = session self.expires_at = expires_at
Instantiate a new session object for use in connecting with Degreed
def get_by_natural_key(self, *args): kwargs = self.natural_key_kwargs(*args) for name, rel_to in self.model.get_natural_key_info(): if not rel_to: continue nested_key = extract_nested_key(kwargs, rel_to, name) if nested_key: try: kwargs[name] = rel_to.objects.get_by_natural_key( *nested_key ) except rel_to.DoesNotExist: raise self.model.DoesNotExist() else: kwargs[name] = None return self.get(**kwargs)
Return the object corresponding to the provided natural key. (This is a generic implementation of the standard Django function)
def process_apk(self, data, name): try: from apk_parse.apk import APK except Exception as e: logger.warning() return [TestResult(fname=name, type=, error=)] ret = [] try: from cryptography.x509.base import load_der_x509_certificate apkf = APK(data, process_now=False, process_file_types=False, raw=True, temp_dir=self.args.tmp_dir) apkf.process() self.num_apk += 1 pem = apkf.cert_pem aux = {: } x509 = load_der_x509_certificate(pem_to_der(pem), self.get_backend()) sub = self.process_x509(x509, name=name, idx=0, data=data, pem=True, source=, aux=aux) ret.append(sub) except Exception as e: logger.debug( % (name, e)) self.trace_logger.log(e) return ret
Processes Android application :param data: :param name: :return:
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None): r prefix = if prefix is None else prefix + name = prefix + _pretty_name(tensor) if name is None else prefix + name if not tf.get_variable_scope().reuse: tf.summary.audio(name + , tensor, sample_rate)
r"""Register `tensor` to summary report as audio Args: tensor: A `Tensor` to log as audio sample_rate : An int. Sample rate to report. Default is 16000. prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
def is_mode_supported(mode, ver): ver = None if ver > 0 else ver try: return ver in consts.SUPPORTED_MODES[mode] except KeyError: raise ModeError(.format(mode))
\ Returns if `mode` is supported by `version`. Note: This function does not check if `version` is actually a valid (Micro) QR Code version. Invalid versions like ``41`` may return an illegal value. :param int mode: Canonicalized mode. :param int or None ver: (Micro) QR Code version constant. :rtype: bool
def replace(pretty, old_str, new_str): out_str = line_number = 1 changes = 0 for line in pretty.splitlines(keepends=True): new_line = line.replace(old_str, new_str) if line.find(old_str) != -1: logging.debug(, line_number) logging.debug(, line) logging.debug(, new_line) changes += 1 out_str += new_line line_number += 1 logging.info(, old_str, changes) return out_str
Replace strings giving some info on where the replacement was done
def socket_parse(self, astr_destination): t_socketInfo = astr_destination.partition() if len(t_socketInfo[1]): self._b_isSocket = True self._socketRemote = t_socketInfo[0] self._socketPort = t_socketInfo[2] else: self._b_isSocket = False return self._b_isSocket
Examines <astr_destination> and if of form <str1>:<str2> assumes that <str1> is a host to send datagram comms to over port <str2>. Returns True or False.
def save_hdf(self, filename, path=, overwrite=False, append=False): if os.path.exists(filename): store = pd.HDFStore(filename) if path in store: store.close() if overwrite: os.remove(filename) elif not append: raise IOError(.format(path,filename)) else: store.close() self.samples.to_hdf(filename, .format(path)) store = pd.HDFStore(filename) attrs = store.get_storer(.format(path)).attrs attrs.properties = self.properties attrs.ic_type = type(self.ic) attrs.maxAV = self.maxAV attrs.max_distance = self.max_distance attrs.min_logg = self.min_logg attrs.use_emcee = self.use_emcee attrs._mnest_basename = self._mnest_basename attrs.name = self.name store.close()
Saves object data to HDF file (only works if MCMC is run) Samples are saved to /samples location under given path, and object properties are also attached, so suitable for re-loading via :func:`StarModel.load_hdf`. :param filename: Name of file to save to. Should be .h5 file. :param path: (optional) Path within HDF file structure to save to. :param overwrite: (optional) If ``True``, delete any existing file by the same name before writing. :param append: (optional) If ``True``, then if a file exists, then just the path within the file will be updated.
def setproctitle(text): try: import setproctitle except Exception as e: return None else: prev = setproctitle.getproctitle() setproctitle.setproctitle(text) return prev
This is a wrapper for setproctitle.setproctitle(). The call sets 'text' as the new process title and returns the previous value. The module is commonly not installed. If missing, nothing is changed, and the call returns None. The module is described here: https://pypi.python.org/pypi/setproctitle