code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def FromSpec(self, spec): """ Args: spec: (name, {...}), or Parameter object Dict keys: "caption" -- (optional) text for label in editor. Defaults to the keyword argument name "toolTip" (optional) "type" -- (optional, defaults to type("value") or int if "value" is not specified. Accepts: - int - float - str - bool - list "value" -- (optional) defaults to 1 if numeric, False if bool, "" if str """ if isinstance(spec, Parameter): self.name = spec.name self.caption = spec.caption if spec.caption is not None else spec.name self.toolTip = spec.toolTip if spec.toolTip is not None else "" self.type = spec.type if spec.type is not None else type(spec.value) if spec.value is not None else int self.value = spec.value else: self.name, d = spec self.caption = d.get("caption", self.name) self.toolTip = d.get("toolTip", "") t = self.type = d.get("type", type(d["value"]) if "value" in d else int) if not t in (int, float, bool, str, list): raise TypeError("Invalid type: '{0!s}'".format(t.__name__)) self.value = d.get("value") if self.value is None: self.value = 0 if self.type == int else \ 0. if self.type == float else \ False if self.type == bool else ""
Args: spec: (name, {...}), or Parameter object Dict keys: "caption" -- (optional) text for label in editor. Defaults to the keyword argument name "toolTip" (optional) "type" -- (optional, defaults to type("value") or int if "value" is not specified. Accepts: - int - float - str - bool - list "value" -- (optional) defaults to 1 if numeric, False if bool, "" if str
def from_ndarray(cls, a_ndarray, bigdl_type="float"): """ Convert a ndarray to a DenseTensor which would be used in Java side. >>> import numpy as np >>> from bigdl.util.common import JTensor >>> from bigdl.util.common import callBigDlFunc >>> np.random.seed(123) >>> data = np.random.uniform(0, 1, (2, 3)).astype("float32") >>> result = JTensor.from_ndarray(data) >>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]]) >>> expected_shape = np.array([2, 3]) >>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6) >>> np.testing.assert_allclose(result.shape, expected_shape) >>> data_back = result.to_ndarray() >>> (data == data_back).all() True >>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa >>> array_from_tensor = tensor1.to_ndarray() >>> (array_from_tensor == data).all() True """ if a_ndarray is None: return None assert isinstance(a_ndarray, np.ndarray), \ "input should be a np.ndarray, not %s" % type(a_ndarray) return cls(a_ndarray, a_ndarray.shape if a_ndarray.shape else (a_ndarray.size), bigdl_type)
Convert a ndarray to a DenseTensor which would be used in Java side. >>> import numpy as np >>> from bigdl.util.common import JTensor >>> from bigdl.util.common import callBigDlFunc >>> np.random.seed(123) >>> data = np.random.uniform(0, 1, (2, 3)).astype("float32") >>> result = JTensor.from_ndarray(data) >>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]]) >>> expected_shape = np.array([2, 3]) >>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6) >>> np.testing.assert_allclose(result.shape, expected_shape) >>> data_back = result.to_ndarray() >>> (data == data_back).all() True >>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa >>> array_from_tensor = tensor1.to_ndarray() >>> (array_from_tensor == data).all() True
def _contribute_to_class(self, mcs_args: McsArgs): """ Where the magic happens. Takes one parameter, the :class:`McsArgs` of the class-under-construction, and processes the declared ``class Meta`` from it (if any). We fill ourself with the declared meta options' name/value pairs, give the declared meta options a chance to also contribute to the class-under- construction, and finally replace the class-under-construction's ``class Meta`` with this populated factory instance (aka ``self``). """ self._mcs_args = mcs_args Meta = mcs_args.clsdict.pop('Meta', None) # type: Type[object] base_classes_meta = mcs_args.getattr('Meta', None) # type: MetaOptionsFactory mcs_args.clsdict['Meta'] = self # must come before _fill_from_meta, because # some meta options may depend upon having # access to the values of earlier meta options self._fill_from_meta(Meta, base_classes_meta, mcs_args) for option in self._get_meta_options(): option_value = getattr(self, option.name, None) option.contribute_to_class(mcs_args, option_value)
Where the magic happens. Takes one parameter, the :class:`McsArgs` of the class-under-construction, and processes the declared ``class Meta`` from it (if any). We fill ourself with the declared meta options' name/value pairs, give the declared meta options a chance to also contribute to the class-under- construction, and finally replace the class-under-construction's ``class Meta`` with this populated factory instance (aka ``self``).
def argparse(argv, parser, arguments): """ A command line argument parser. Parses arguments coming from the argv Observable and outputs them as Argument items in the output observable. Parameters ----------- argv : Observable An Observable of strings. parser : Observable An Observable containing one Parser item. arguments : Observable An Observable containing ArgumentDef items. Returns ------- Observable An Observable of Argument items. """ def add_arg(parser, arg_spec): parser.add_argument(arg_spec.name, help=arg_spec.help) return parser parse_request = parser \ .map(lambda i: ArgumentParser(description=i.description)) \ .combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \ .last() \ .combine_latest(argv.to_list(), lambda parser, args: (parser,args)) def subscribe(observer): def on_next(value): parser, args = value try: args = parser.parse_args(args) for key,value in vars(args).items(): observer.on_next(Argument(key=key, value=value)) except NameError as exc: observer.on_error("{}\n{}".format(exc, parser.format_help())) return parse_request.subscribe(on_next, observer.on_error, observer.on_completed) return AnonymousObservable(subscribe)
A command line argument parser. Parses arguments coming from the argv Observable and outputs them as Argument items in the output observable. Parameters ----------- argv : Observable An Observable of strings. parser : Observable An Observable containing one Parser item. arguments : Observable An Observable containing ArgumentDef items. Returns ------- Observable An Observable of Argument items.
def prep_bootstrap(mpt): ''' Update and get the random script to a random place CLI Example: .. code-block:: bash salt '*' seed.prep_bootstrap /tmp ''' # Verify that the boostrap script is downloaded bs_ = __salt__['config.gather_bootstrap_script']() fpd_ = os.path.join(mpt, 'tmp', "{0}".format( uuid.uuid4())) if not os.path.exists(fpd_): os.makedirs(fpd_) os.chmod(fpd_, 0o700) fp_ = os.path.join(fpd_, os.path.basename(bs_)) # Copy script into tmp shutil.copy(bs_, fp_) tmppath = fpd_.replace(mpt, '') return fp_, tmppath
Update and get the random script to a random place CLI Example: .. code-block:: bash salt '*' seed.prep_bootstrap /tmp
def pop(self): """Pop an entry off the stack and make its node a child of the last.""" dfa, state, node = self.stack.pop() if self.stack: self.stack[-1][2].children.append(node) else: self.root = node
Pop an entry off the stack and make its node a child of the last.
def from_tabledata(self, value, is_overwrite_table_name=True): """ Set following attributes from |TableData| - :py:attr:`~.table_name`. - :py:attr:`~.headers`. - :py:attr:`~.value_matrix`. And create worksheet named from :py:attr:`~.table_name` ABC if not existed yet. :param tabledata.TableData value: Input table data. """ super(ExcelTableWriter, self).from_tabledata(value) if self.is_opened(): self.make_worksheet(self.table_name)
Set following attributes from |TableData| - :py:attr:`~.table_name`. - :py:attr:`~.headers`. - :py:attr:`~.value_matrix`. And create worksheet named from :py:attr:`~.table_name` ABC if not existed yet. :param tabledata.TableData value: Input table data.
def find(max_depth=3): """Returns the path of a Pipfile in parent directories.""" i = 0 for c, d, f in walk_up(os.getcwd()): i += 1 if i < max_depth: if 'Pipfile': p = os.path.join(c, 'Pipfile') if os.path.isfile(p): return p raise RuntimeError('No Pipfile found!')
Returns the path of a Pipfile in parent directories.
def extend_with_ms(self, req, sms_dict): """ Add signed metadata statements to a request :param req: The request :param sms_dict: A dictionary with FO IDs as keys and signed metadata statements (sms) or uris pointing to sms as values. :return: The updated request """ _ms_uri = {} _ms = {} for fo, sms in sms_dict.items(): if sms.startswith('http://') or sms.startswith('https://'): _ms_uri[fo] = sms else: _ms[fo] = sms if _ms: req['metadata_statements'] = Message(**_ms) if _ms_uri: req['metadata_statement_uris'] = Message(**_ms_uri) return req
Add signed metadata statements to a request :param req: The request :param sms_dict: A dictionary with FO IDs as keys and signed metadata statements (sms) or uris pointing to sms as values. :return: The updated request
def set_pkg_summary(self, doc, text): """Set's the package summary. Raises CardinalityError if summary already set. Raises OrderError if no package previously defined. """ self.assert_package_exists() if not self.package_summary_set: self.package_summary_set = True doc.package.summary = text else: raise CardinalityError('Package::Summary')
Set's the package summary. Raises CardinalityError if summary already set. Raises OrderError if no package previously defined.
def sparql(self, stringa): """ wrapper around a sparql query """ qres = self.rdfgraph.query(stringa) return list(qres)
wrapper around a sparql query
def apply_new_outcome_name(self, path, new_name): """Apply the newly entered outcome name it is was changed :param str path: The path string of the renderer :param str new_name: Newly entered outcome name """ # Don't do anything if outcome name didn't change if new_name == self.list_store[path][self.NAME_STORAGE_ID]: return outcome = self.list_store[path][self.CORE_STORAGE_ID] try: outcome.name = new_name logger.debug("Outcome name changed to '{0}'".format(outcome.name)) except (ValueError, TypeError) as e: logger.warning("The name of the outcome could not be changed: {0}".format(e)) self.list_store[path][self.NAME_STORAGE_ID] = outcome.name
Apply the newly entered outcome name it is was changed :param str path: The path string of the renderer :param str new_name: Newly entered outcome name
def get_favourite_accounts(self) -> List[Account]: """ Provides a list of favourite accounts """ from gnucash_portfolio.lib.settings import Settings settings = Settings() favourite_accts = settings.favourite_accounts accounts = self.get_list(favourite_accts) return accounts
Provides a list of favourite accounts
def metadata_index_json(self): """str: Path to the INDEX_JSON file.""" try: return op.join(self.metadata_dir, 'INDEX.json') except FileNotFoundError: return op.join(self.metadata_dir, 'INDEX_JSON')
str: Path to the INDEX_JSON file.
def load_plugins(self): """Refresh the list of available collectors and auditors Returns: `None` """ for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.collectors']['plugins']: cls = entry_point.load() if cls.enabled(): self.log.debug('Collector loaded: {} in module {}'.format(cls.__name__, cls.__module__)) self.collectors.setdefault(cls.type, []).append(Worker( cls.name, cls.interval, { 'name': entry_point.name, 'module_name': entry_point.module_name, 'attrs': entry_point.attrs } )) else: self.log.debug('Collector disabled: {} in module {}'.format(cls.__name__, cls.__module__)) for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auditors']['plugins']: cls = entry_point.load() if cls.enabled(): self.log.debug('Auditor loaded: {} in module {}'.format(cls.__name__, cls.__module__)) self.auditors.append(Worker( cls.name, cls.interval, { 'name': entry_point.name, 'module_name': entry_point.module_name, 'attrs': entry_point.attrs } )) else: self.log.debug('Auditor disabled: {} in module {}'.format(cls.__name__, cls.__module__)) collector_count = sum(len(x) for x in self.collectors.values()) auditor_count = len(self.auditors) if collector_count + auditor_count == 0: raise Exception('No auditors or collectors loaded, aborting scheduler') self.log.info('Scheduler loaded {} collectors and {} auditors'.format(collector_count, auditor_count))
Refresh the list of available collectors and auditors Returns: `None`
def DirectoryStimuliFactory(loader): """ Takes an input path to the images folder of an experiment and generates automatically the category - filenumber list needed to construct an appropriate _categories object. Parameters : loader : Loader object which contains impath : string path to the input, i.e. image-, files of the experiment. All subfolders in that path will be treated as categories. If no subfolders are present, category 1 will be assigned and all files in the folder are considered input images. Images have to end in '.png'. ftrpath : string path to the feature folder. It is expected that the folder structure corresponds to the structure in impath, i.e. ftrpath/category/featurefolder/featuremap.mat Furthermore, features are assumed to be the same for all categories. """ impath = loader.impath ftrpath = loader.ftrpath # checks whether user has reading permission for the path assert os.access(impath, os.R_OK) assert os.access(ftrpath, os.R_OK) # EXTRACTING IMAGE NAMES img_per_cat = {} # extract only directories in the given folder subfolders = [name for name in os.listdir(impath) if os.path.isdir( os.path.join(impath, name))] # if there are no subfolders, walk through files. Take 1 as key for the # categories object if not subfolders: [_, _, files] = next(os.walk(os.path.join(impath))) # this only takes entries that end with '.png' entries = {1: [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if cur_file.endswith('.png')]} img_per_cat.update(entries) subfolders = [''] # if there are subfolders, walk through them else: for directory in subfolders: [_, _, files] = next(os.walk(os.path.join(impath, directory))) # this only takes entries that end with '.png'. Strips ending and # considers everything after the first '_' as the imagenumber imagenumbers = [int(cur_file[cur_file.find('_')+1:-4]) for cur_file in files if (cur_file.endswith('.png') & (len(cur_file) > 4))] entries = {int(directory): imagenumbers} img_per_cat.update(entries) del directory del imagenumbers # in case subfolders do not exist, '' is appended here. _, features, files = next(os.walk(os.path.join(ftrpath, subfolders[0]))) return Categories(loader, img_per_cat = img_per_cat, features = features)
Takes an input path to the images folder of an experiment and generates automatically the category - filenumber list needed to construct an appropriate _categories object. Parameters : loader : Loader object which contains impath : string path to the input, i.e. image-, files of the experiment. All subfolders in that path will be treated as categories. If no subfolders are present, category 1 will be assigned and all files in the folder are considered input images. Images have to end in '.png'. ftrpath : string path to the feature folder. It is expected that the folder structure corresponds to the structure in impath, i.e. ftrpath/category/featurefolder/featuremap.mat Furthermore, features are assumed to be the same for all categories.
def strnum(prefix: str, num: int, suffix: str = "") -> str: """ Makes a string of the format ``<prefix><number><suffix>``. """ return "{}{}{}".format(prefix, num, suffix)
Makes a string of the format ``<prefix><number><suffix>``.
def proper_path(path): """ Clean up the path specification so it looks like something I could use. "./" <path> "/" """ if path.startswith("./"): pass elif path.startswith("/"): path = ".%s" % path elif path.startswith("."): while path.startswith("."): path = path[1:] if path.startswith("/"): path = ".%s" % path else: path = "./%s" % path if not path.endswith("/"): path += "/" return path
Clean up the path specification so it looks like something I could use. "./" <path> "/"
def record(self, person, event, properties=None, timestamp=None, path=KISSmetrics.RECORD_PATH): """Record `event` for `person` with any `properties`. :param person: the individual performing the `event` :param event: the `event` name that was performed :param properties: any additional data to include :type properties: dict :param timestamp: when the `event` was performed; optional for back-dating :param path: HTTP endpoint to use; defaults to ``KISSmetrics.RECORD_PATH`` :returns: an HTTP response for the request :rtype: `urllib3.response.HTTPResponse` """ this_request = request.record(self.key, person, event, timestamp=timestamp, properties=properties, scheme=self.trk_scheme, host=self.trk_host, path=path) return self._request(this_request)
Record `event` for `person` with any `properties`. :param person: the individual performing the `event` :param event: the `event` name that was performed :param properties: any additional data to include :type properties: dict :param timestamp: when the `event` was performed; optional for back-dating :param path: HTTP endpoint to use; defaults to ``KISSmetrics.RECORD_PATH`` :returns: an HTTP response for the request :rtype: `urllib3.response.HTTPResponse`
def _processHandler(self, securityHandler, param_dict): """proceses the handler and returns the cookiejar""" cj = None handler = None if securityHandler is None: cj = cookiejar.CookieJar() elif securityHandler.method.lower() == "token" or \ securityHandler.method.lower() == "oauth": param_dict['token'] = securityHandler.token if hasattr(securityHandler, 'cookiejar'): cj = securityHandler.cookiejar if hasattr(securityHandler, 'handler'): handler = securityHandler.handler elif securityHandler.method.lower() == "handler": handler = securityHandler.handler cj = securityHandler.cookiejar if len(param_dict) > 0: for k,v in param_dict.items(): if isinstance(v, bool): param_dict[k] = json.dumps(v) return param_dict, handler, cj
proceses the handler and returns the cookiejar
def find_jump_targets(self, debug): """ Detect all offsets in a byte code which are jump targets where we might insert a COME_FROM instruction. Return the list of offsets. Return the list of offsets. An instruction can be jumped to in from multiple instructions. """ code = self.code n = len(code) self.structs = [{'type': 'root', 'start': 0, 'end': n-1}] # All loop entry points self.loops = [] # Map fixed jumps to their real destination self.fixed_jumps = {} self.except_targets = {} self.ignore_if = set() self.build_statement_indices() self.else_start = {} # Containers filled by detect_control_flow() self.not_continue = set() self.return_end_ifs = set() self.setup_loop_targets = {} # target given setup_loop offset self.setup_loops = {} # setup_loop offset given target targets = {} for i, inst in enumerate(self.insts): offset = inst.offset op = inst.opcode # Determine structures and fix jumps in Python versions # since 2.3 self.detect_control_flow(offset, targets, i) if inst.has_arg: label = self.fixed_jumps.get(offset) oparg = inst.arg if (self.version >= 3.6 and self.code[offset] == self.opc.EXTENDED_ARG): j = xdis.next_offset(op, self.opc, offset) next_offset = xdis.next_offset(op, self.opc, j) else: next_offset = xdis.next_offset(op, self.opc, offset) if label is None: if op in self.opc.hasjrel and op != self.opc.FOR_ITER: label = next_offset + oparg elif op in self.opc.hasjabs: if op in self.jump_if_pop: if oparg > offset: label = oparg if label is not None and label != -1: targets[label] = targets.get(label, []) + [offset] elif op == self.opc.END_FINALLY and offset in self.fixed_jumps: label = self.fixed_jumps[offset] targets[label] = targets.get(label, []) + [offset] pass pass # for loop # DEBUG: if debug in ('both', 'after'): import pprint as pp pp.pprint(self.structs) return targets
Detect all offsets in a byte code which are jump targets where we might insert a COME_FROM instruction. Return the list of offsets. Return the list of offsets. An instruction can be jumped to in from multiple instructions.
def xmoe2_v1_l4k(): """With sequence length 4096.""" hparams = xmoe2_v1() hparams.batch_size = 32 hparams.max_length = 4096 hparams.split_to_length = 4096 hparams.reshape_logits_hack = True return hparams
With sequence length 4096.
def all_files_in_directory(path): """ Recursively ist all files under a directory """ file_list = [] for dirname, dirnames, filenames in os.walk(path): for filename in filenames: file_list.append(os.path.join(dirname, filename)) return file_list
Recursively ist all files under a directory
def comicPageLink(self, comic, url, prevUrl): """Write previous link into JSON.""" pageInfo = self.getPageInfo(comic, url) pageInfo['prev'] = prevUrl
Write previous link into JSON.