Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
389,200 | def FromSpec(self, spec):
if isinstance(spec, Parameter):
self.name = spec.name
self.caption = spec.caption if spec.caption is not None else spec.name
self.toolTip = spec.toolTip if spec.toolTip is not None else ""
self.type = spec.type if spec.type is not None else type(spec.value) if spec.value is not None else int
self.value = spec.value
else:
self.name, d = spec
self.caption = d.get("caption", self.name)
self.toolTip = d.get("toolTip", "")
t = self.type = d.get("type", type(d["value"]) if "value" in d else int)
if not t in (int, float, bool, str, list):
raise TypeError("Invalid type: ".format(t.__name__))
self.value = d.get("value")
if self.value is None:
self.value = 0 if self.type == int else \
0. if self.type == float else \
False if self.type == bool else "" | Args:
spec: (name, {...}), or Parameter object
Dict keys:
"caption" -- (optional) text for label in editor. Defaults to the
keyword argument name
"toolTip" (optional)
"type" -- (optional, defaults to type("value") or int if "value" is
not specified. Accepts:
- int
- float
- str
- bool
- list
"value" -- (optional) defaults to 1 if numeric, False if bool,
"" if str |
389,201 | def from_ndarray(cls, a_ndarray, bigdl_type="float"):
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape if a_ndarray.shape else (a_ndarray.size),
bigdl_type) | Convert a ndarray to a DenseTensor which would be used in Java side.
>>> import numpy as np
>>> from bigdl.util.common import JTensor
>>> from bigdl.util.common import callBigDlFunc
>>> np.random.seed(123)
>>> data = np.random.uniform(0, 1, (2, 3)).astype("float32")
>>> result = JTensor.from_ndarray(data)
>>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]])
>>> expected_shape = np.array([2, 3])
>>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6)
>>> np.testing.assert_allclose(result.shape, expected_shape)
>>> data_back = result.to_ndarray()
>>> (data == data_back).all()
True
>>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa
>>> array_from_tensor = tensor1.to_ndarray()
>>> (array_from_tensor == data).all()
True |
389,202 | def _contribute_to_class(self, mcs_args: McsArgs):
self._mcs_args = mcs_args
Meta = mcs_args.clsdict.pop(, None)
base_classes_meta = mcs_args.getattr(, None)
mcs_args.clsdict[] = self
self._fill_from_meta(Meta, base_classes_meta, mcs_args)
for option in self._get_meta_options():
option_value = getattr(self, option.name, None)
option.contribute_to_class(mcs_args, option_value) | Where the magic happens. Takes one parameter, the :class:`McsArgs` of the
class-under-construction, and processes the declared ``class Meta`` from
it (if any). We fill ourself with the declared meta options' name/value pairs,
give the declared meta options a chance to also contribute to the class-under-
construction, and finally replace the class-under-construction's ``class Meta``
with this populated factory instance (aka ``self``). |
389,203 | def argparse(argv, parser, arguments):
def add_arg(parser, arg_spec):
parser.add_argument(arg_spec.name, help=arg_spec.help)
return parser
parse_request = parser \
.map(lambda i: ArgumentParser(description=i.description)) \
.combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \
.last() \
.combine_latest(argv.to_list(), lambda parser, args: (parser,args))
def subscribe(observer):
def on_next(value):
parser, args = value
try:
args = parser.parse_args(args)
for key,value in vars(args).items():
observer.on_next(Argument(key=key, value=value))
except NameError as exc:
observer.on_error("{}\n{}".format(exc, parser.format_help()))
return parse_request.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe) | A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items. |
389,204 | def prep_bootstrap(mpt):
*
bs_ = __salt__[]()
fpd_ = os.path.join(mpt, , "{0}".format(
uuid.uuid4()))
if not os.path.exists(fpd_):
os.makedirs(fpd_)
os.chmod(fpd_, 0o700)
fp_ = os.path.join(fpd_, os.path.basename(bs_))
shutil.copy(bs_, fp_)
tmppath = fpd_.replace(mpt, )
return fp_, tmppath | Update and get the random script to a random place
CLI Example:
.. code-block:: bash
salt '*' seed.prep_bootstrap /tmp |
389,205 | def pop(self):
dfa, state, node = self.stack.pop()
if self.stack:
self.stack[-1][2].children.append(node)
else:
self.root = node | Pop an entry off the stack and make its node a child of the last. |
389,206 | def from_tabledata(self, value, is_overwrite_table_name=True):
super(ExcelTableWriter, self).from_tabledata(value)
if self.is_opened():
self.make_worksheet(self.table_name) | Set following attributes from |TableData|
- :py:attr:`~.table_name`.
- :py:attr:`~.headers`.
- :py:attr:`~.value_matrix`.
And create worksheet named from :py:attr:`~.table_name` ABC
if not existed yet.
:param tabledata.TableData value: Input table data. |
389,207 | def find(max_depth=3):
i = 0
for c, d, f in walk_up(os.getcwd()):
i += 1
if i < max_depth:
if :
p = os.path.join(c, )
if os.path.isfile(p):
return p
raise RuntimeError() | Returns the path of a Pipfile in parent directories. |
389,208 | def extend_with_ms(self, req, sms_dict):
_ms_uri = {}
_ms = {}
for fo, sms in sms_dict.items():
if sms.startswith() or sms.startswith():
_ms_uri[fo] = sms
else:
_ms[fo] = sms
if _ms:
req[] = Message(**_ms)
if _ms_uri:
req[] = Message(**_ms_uri)
return req | Add signed metadata statements to a request
:param req: The request
:param sms_dict: A dictionary with FO IDs as keys and signed metadata
statements (sms) or uris pointing to sms as values.
:return: The updated request |
389,209 | def set_pkg_summary(self, doc, text):
self.assert_package_exists()
if not self.package_summary_set:
self.package_summary_set = True
doc.package.summary = text
else:
raise CardinalityError() | Set's the package summary.
Raises CardinalityError if summary already set.
Raises OrderError if no package previously defined. |
389,210 | def sparql(self, stringa):
qres = self.rdfgraph.query(stringa)
return list(qres) | wrapper around a sparql query |
389,211 | def apply_new_outcome_name(self, path, new_name):
if new_name == self.list_store[path][self.NAME_STORAGE_ID]:
return
outcome = self.list_store[path][self.CORE_STORAGE_ID]
try:
outcome.name = new_name
logger.debug("Outcome name changed to ".format(outcome.name))
except (ValueError, TypeError) as e:
logger.warning("The name of the outcome could not be changed: {0}".format(e))
self.list_store[path][self.NAME_STORAGE_ID] = outcome.name | Apply the newly entered outcome name it is was changed
:param str path: The path string of the renderer
:param str new_name: Newly entered outcome name |
389,212 | def get_favourite_accounts(self) -> List[Account]:
from gnucash_portfolio.lib.settings import Settings
settings = Settings()
favourite_accts = settings.favourite_accounts
accounts = self.get_list(favourite_accts)
return accounts | Provides a list of favourite accounts |
389,213 | def metadata_index_json(self):
try:
return op.join(self.metadata_dir, )
except FileNotFoundError:
return op.join(self.metadata_dir, ) | str: Path to the INDEX_JSON file. |
389,214 | def load_plugins(self):
for entry_point in CINQ_PLUGINS[][]:
cls = entry_point.load()
if cls.enabled():
self.log.debug(.format(cls.__name__, cls.__module__))
self.collectors.setdefault(cls.type, []).append(Worker(
cls.name,
cls.interval,
{
: entry_point.name,
: entry_point.module_name,
: entry_point.attrs
}
))
else:
self.log.debug(.format(cls.__name__, cls.__module__))
for entry_point in CINQ_PLUGINS[][]:
cls = entry_point.load()
if cls.enabled():
self.log.debug(.format(cls.__name__, cls.__module__))
self.auditors.append(Worker(
cls.name,
cls.interval,
{
: entry_point.name,
: entry_point.module_name,
: entry_point.attrs
}
))
else:
self.log.debug(.format(cls.__name__, cls.__module__))
collector_count = sum(len(x) for x in self.collectors.values())
auditor_count = len(self.auditors)
if collector_count + auditor_count == 0:
raise Exception()
self.log.info(.format(collector_count, auditor_count)) | Refresh the list of available collectors and auditors
Returns:
`None` |
389,215 | def DirectoryStimuliFactory(loader):
impath = loader.impath
ftrpath = loader.ftrpath
assert os.access(impath, os.R_OK)
assert os.access(ftrpath, os.R_OK)
img_per_cat = {}
subfolders = [name for name in os.listdir(impath) if os.path.isdir(
os.path.join(impath, name))]
if not subfolders:
[_, _, files] = next(os.walk(os.path.join(impath)))
entries = {1:
[int(cur_file[cur_file.find()+1:-4]) for cur_file
in files if cur_file.endswith()]}
img_per_cat.update(entries)
subfolders = []
else:
for directory in subfolders:
[_, _, files] = next(os.walk(os.path.join(impath, directory)))
imagenumbers = [int(cur_file[cur_file.find()+1:-4])
for cur_file in files
if (cur_file.endswith() & (len(cur_file) > 4))]
entries = {int(directory): imagenumbers}
img_per_cat.update(entries)
del directory
del imagenumbers
_, features, files = next(os.walk(os.path.join(ftrpath,
subfolders[0])))
return Categories(loader, img_per_cat = img_per_cat, features = features) | Takes an input path to the images folder of an experiment and generates
automatically the category - filenumber list needed to construct an
appropriate _categories object.
Parameters :
loader : Loader object which contains
impath : string
path to the input, i.e. image-, files of the experiment. All
subfolders in that path will be treated as categories. If no
subfolders are present, category 1 will be assigned and all
files in the folder are considered input images.
Images have to end in '.png'.
ftrpath : string
path to the feature folder. It is expected that the folder
structure corresponds to the structure in impath, i.e.
ftrpath/category/featurefolder/featuremap.mat
Furthermore, features are assumed to be the same for all
categories. |
389,216 | def strnum(prefix: str, num: int, suffix: str = "") -> str:
return "{}{}{}".format(prefix, num, suffix) | Makes a string of the format ``<prefix><number><suffix>``. |
389,217 | def proper_path(path):
if path.startswith("./"):
pass
elif path.startswith("/"):
path = ".%s" % path
elif path.startswith("."):
while path.startswith("."):
path = path[1:]
if path.startswith("/"):
path = ".%s" % path
else:
path = "./%s" % path
if not path.endswith("/"):
path += "/"
return path | Clean up the path specification so it looks like something I could use.
"./" <path> "/" |
389,218 | def record(self, person, event, properties=None, timestamp=None,
path=KISSmetrics.RECORD_PATH):
this_request = request.record(self.key, person, event,
timestamp=timestamp,
properties=properties,
scheme=self.trk_scheme,
host=self.trk_host, path=path)
return self._request(this_request) | Record `event` for `person` with any `properties`.
:param person: the individual performing the `event`
:param event: the `event` name that was performed
:param properties: any additional data to include
:type properties: dict
:param timestamp: when the `event` was performed; optional for
back-dating
:param path: HTTP endpoint to use; defaults to
``KISSmetrics.RECORD_PATH``
:returns: an HTTP response for the request
:rtype: `urllib3.response.HTTPResponse` |
389,219 | def _processHandler(self, securityHandler, param_dict):
cj = None
handler = None
if securityHandler is None:
cj = cookiejar.CookieJar()
elif securityHandler.method.lower() == "token" or \
securityHandler.method.lower() == "oauth":
param_dict[] = securityHandler.token
if hasattr(securityHandler, ):
cj = securityHandler.cookiejar
if hasattr(securityHandler, ):
handler = securityHandler.handler
elif securityHandler.method.lower() == "handler":
handler = securityHandler.handler
cj = securityHandler.cookiejar
if len(param_dict) > 0:
for k,v in param_dict.items():
if isinstance(v, bool):
param_dict[k] = json.dumps(v)
return param_dict, handler, cj | proceses the handler and returns the cookiejar |
389,220 | def find_jump_targets(self, debug):
code = self.code
n = len(code)
self.structs = [{: ,
: 0,
: n-1}]
self.loops = []
self.fixed_jumps = {}
self.except_targets = {}
self.ignore_if = set()
self.build_statement_indices()
self.else_start = {}
self.not_continue = set()
self.return_end_ifs = set()
self.setup_loop_targets = {}
self.setup_loops = {}
targets = {}
for i, inst in enumerate(self.insts):
offset = inst.offset
op = inst.opcode
self.detect_control_flow(offset, targets, i)
if inst.has_arg:
label = self.fixed_jumps.get(offset)
oparg = inst.arg
if (self.version >= 3.6 and
self.code[offset] == self.opc.EXTENDED_ARG):
j = xdis.next_offset(op, self.opc, offset)
next_offset = xdis.next_offset(op, self.opc, j)
else:
next_offset = xdis.next_offset(op, self.opc, offset)
if label is None:
if op in self.opc.hasjrel and op != self.opc.FOR_ITER:
label = next_offset + oparg
elif op in self.opc.hasjabs:
if op in self.jump_if_pop:
if oparg > offset:
label = oparg
if label is not None and label != -1:
targets[label] = targets.get(label, []) + [offset]
elif op == self.opc.END_FINALLY and offset in self.fixed_jumps:
label = self.fixed_jumps[offset]
targets[label] = targets.get(label, []) + [offset]
pass
pass
if debug in (, ):
import pprint as pp
pp.pprint(self.structs)
return targets | Detect all offsets in a byte code which are jump targets
where we might insert a COME_FROM instruction.
Return the list of offsets.
Return the list of offsets. An instruction can be jumped
to in from multiple instructions. |
389,221 | def xmoe2_v1_l4k():
hparams = xmoe2_v1()
hparams.batch_size = 32
hparams.max_length = 4096
hparams.split_to_length = 4096
hparams.reshape_logits_hack = True
return hparams | With sequence length 4096. |
389,222 | def all_files_in_directory(path):
file_list = []
for dirname, dirnames, filenames in os.walk(path):
for filename in filenames:
file_list.append(os.path.join(dirname, filename))
return file_list | Recursively ist all files under a directory |
389,223 | def comicPageLink(self, comic, url, prevUrl):
pageInfo = self.getPageInfo(comic, url)
pageInfo[] = prevUrl | Write previous link into JSON. |