code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def generate_data(self, data_dir, tmp_dir, task_id=-1):
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
split_paths = [(split["split"], filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=True))
for split in self.dataset_splits]
all_paths = []
for _, paths in split_paths:
all_paths.extend(paths)
if self.is_generate_per_split:
for split, paths in split_paths:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir, split),
paths,
cycle_every_n=self.total_number_of_frames // len(paths))
else:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir,
problem.DatasetSplit.TRAIN),
all_paths,
cycle_every_n=self.total_number_of_frames // len(all_paths)) | The function generating the data. | ### Input:
The function generating the data.
### Response:
def generate_data(self, data_dir, tmp_dir, task_id=-1):
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
split_paths = [(split["split"], filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=True))
for split in self.dataset_splits]
all_paths = []
for _, paths in split_paths:
all_paths.extend(paths)
if self.is_generate_per_split:
for split, paths in split_paths:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir, split),
paths,
cycle_every_n=self.total_number_of_frames // len(paths))
else:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir,
problem.DatasetSplit.TRAIN),
all_paths,
cycle_every_n=self.total_number_of_frames // len(all_paths)) |
def off(self, event, handler):
event_hook = self.get_or_create(event)
event_hook.unsubscribe(handler)
return self | Detaches the handler from the specified event.
@param event: event to detach the handler to. Any object can be passed
as event, but string is preferable. If qcore.EnumBase
instance is passed, its name is used as event key.
@param handler: event handler.
@return: self, so calls like this can be chained together. | ### Input:
Detaches the handler from the specified event.
@param event: event to detach the handler to. Any object can be passed
as event, but string is preferable. If qcore.EnumBase
instance is passed, its name is used as event key.
@param handler: event handler.
@return: self, so calls like this can be chained together.
### Response:
def off(self, event, handler):
event_hook = self.get_or_create(event)
event_hook.unsubscribe(handler)
return self |
def list_presubscriptions(self, **kwargs):
api = self._get_api(mds.SubscriptionsApi)
resp = api.get_pre_subscriptions(**kwargs)
return [Presubscription(p) for p in resp] | Get a list of pre-subscription data
:returns: a list of `Presubscription` objects
:rtype: list of mbed_cloud.presubscription.Presubscription | ### Input:
Get a list of pre-subscription data
:returns: a list of `Presubscription` objects
:rtype: list of mbed_cloud.presubscription.Presubscription
### Response:
def list_presubscriptions(self, **kwargs):
api = self._get_api(mds.SubscriptionsApi)
resp = api.get_pre_subscriptions(**kwargs)
return [Presubscription(p) for p in resp] |
def get_list_display(self, request):
list_display = []
for field_name in self.list_display:
try:
db_field = self.model._meta.get_field(field_name)
if isinstance(db_field, BooleanField):
field_name = boolean_switch_field(db_field)
except FieldDoesNotExist:
pass
list_display.append(field_name)
return list_display | Return a sequence containing the fields to be displayed on the
changelist. | ### Input:
Return a sequence containing the fields to be displayed on the
changelist.
### Response:
def get_list_display(self, request):
list_display = []
for field_name in self.list_display:
try:
db_field = self.model._meta.get_field(field_name)
if isinstance(db_field, BooleanField):
field_name = boolean_switch_field(db_field)
except FieldDoesNotExist:
pass
list_display.append(field_name)
return list_display |
def index_deposit_after_publish(sender, action=None, pid=None, deposit=None):
if action == :
_, record = deposit.fetch_published()
index_record.delay(str(record.id)) | Index the record after publishing.
.. note:: if the record is not published, it doesn't index.
:param sender: Who send the signal.
:param action: Action executed by the sender. (Default: ``None``)
:param pid: PID object. (Default: ``None``)
:param deposit: Deposit object. (Default: ``None``) | ### Input:
Index the record after publishing.
.. note:: if the record is not published, it doesn't index.
:param sender: Who send the signal.
:param action: Action executed by the sender. (Default: ``None``)
:param pid: PID object. (Default: ``None``)
:param deposit: Deposit object. (Default: ``None``)
### Response:
def index_deposit_after_publish(sender, action=None, pid=None, deposit=None):
if action == :
_, record = deposit.fetch_published()
index_record.delay(str(record.id)) |
def _Open(self, path_spec, mode=):
if not path_spec.HasParent():
raise errors.PathSpecError(
)
range_offset = getattr(path_spec, , None)
if range_offset is None:
raise errors.PathSpecError(
)
range_size = getattr(path_spec, , None)
if range_size is None:
raise errors.PathSpecError(
)
self._range_offset = range_offset
self._range_size = range_size | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | ### Input:
Opens the file system defined by path specification.
Args:
path_spec (PathSpec): a path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
### Response:
def _Open(self, path_spec, mode=):
if not path_spec.HasParent():
raise errors.PathSpecError(
)
range_offset = getattr(path_spec, , None)
if range_offset is None:
raise errors.PathSpecError(
)
range_size = getattr(path_spec, , None)
if range_size is None:
raise errors.PathSpecError(
)
self._range_offset = range_offset
self._range_size = range_size |
def _importAndCheckStack(importName):
try:
return __import__(importName)
except ImportError:
excType, excValue, excTraceback = sys.exc_info()
while excTraceback:
execName = excTraceback.tb_frame.f_globals["__name__"]
if execName is None or execName == importName:
reraise(excValue, excTraceback)
excTraceback = excTraceback.tb_next
raise _NoModuleFound() | Import the given name as a module, then walk the stack to determine whether
the failure was the module not existing, or some code in the module (for
example a dependent import) failing. This can be helpful to determine
whether any actual application code was run. For example, to distiguish
administrative error (entering the wrong module name), from programmer
error (writing buggy code in a module that fails to import).
@param importName: The name of the module to import.
@type importName: C{str}
@raise Exception: if something bad happens. This can be any type of
exception, since nobody knows what loading some arbitrary code might
do.
@raise _NoModuleFound: if no module was found. | ### Input:
Import the given name as a module, then walk the stack to determine whether
the failure was the module not existing, or some code in the module (for
example a dependent import) failing. This can be helpful to determine
whether any actual application code was run. For example, to distiguish
administrative error (entering the wrong module name), from programmer
error (writing buggy code in a module that fails to import).
@param importName: The name of the module to import.
@type importName: C{str}
@raise Exception: if something bad happens. This can be any type of
exception, since nobody knows what loading some arbitrary code might
do.
@raise _NoModuleFound: if no module was found.
### Response:
def _importAndCheckStack(importName):
try:
return __import__(importName)
except ImportError:
excType, excValue, excTraceback = sys.exc_info()
while excTraceback:
execName = excTraceback.tb_frame.f_globals["__name__"]
if execName is None or execName == importName:
reraise(excValue, excTraceback)
excTraceback = excTraceback.tb_next
raise _NoModuleFound() |
def iodp_samples_srm(df, spec_file=,samp_file="samples.txt",site_file="sites.txt",dir_path=,
input_dir_path=,comp_depth_key="",lat="",lon=""):
spec_reqd_columns=[,,,,,\
,]
samp_reqd_columns=[,,,,\
,,,,,,]
site_reqd_columns=[,,,,,,,\
,,\
,,,\
,,,,]
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
spec_out = os.path.join(output_dir_path, spec_file)
samp_out = os.path.join(output_dir_path, samp_file)
site_out = os.path.join(output_dir_path, site_file)
specimens_df=pd.DataFrame(columns = spec_reqd_columns)
samples_df=pd.DataFrame(columns = samp_reqd_columns)
sites_df=pd.DataFrame(columns = site_reqd_columns)
depth_key = "Depth CSF-A (m)"
text_key = "Text ID"
date_key = "Date sample logged"
volume_key =
holes,specimens=iodp_sample_names(df)
specimens_df[]=specimens
specimens_df[]=specimens
samples_df[]=specimens
samples_df[]=specimens
samples_df[]=df[depth_key]
if comp_depth_key: samples_df[]=df[comp_depth_key]
sites_df[]=specimens
sites_df[]=df[depth_key]
sites_df[]=lat
sites_df[]=lon
sites_df[]=
sites_df[]=
sites_df[]=
sites_df[]=holes[0]
sites_df[]="This study"
if comp_depth_key: sites_df[]=df[comp_depth_key]
samples_df[]=
samples_df[]=specimens
samples_df[]=
samples_df[]=
samples_df[]=
samples_df[]=
samples_df[]=
specimens_df[]=
specimens_df[]=
specimens_df[]=
specimens_df.fillna("",inplace=True)
samples_df.fillna("",inplace=True)
sites_df.fillna("",inplace=True)
spec_dicts = specimens_df.to_dict()
pmag.magic_write(spec_out, spec_dicts, )
samp_dicts = samples_df.to_dict()
pmag.magic_write(samp_out, samp_dicts, )
site_dicts = sites_df.to_dict()
pmag.magic_write(site_out, site_dicts, )
return holes[0],specimens | Convert IODP samples data generated from the SRM measurements file into datamodel 3.0 MagIC samples file.
Default is to overwrite the output files in your working directory.
Parameters
----------
df : str
Pandas DataFrame of SRM Archive data
dir_path : str
working directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
spec_file : str
output specimens.txt file name
samp_file : str
output samples.txt file name
site_file : str
output sites.txt file name
comp_depth_key : str
if not "", there is a composite depth model, for example 'Depth CSF-B (m)'
lat : float
latitude of hole location
lon : float
longitude of hole location
NOTE: all output files will overwrite existing files.
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful,file names written) | ### Input:
Convert IODP samples data generated from the SRM measurements file into datamodel 3.0 MagIC samples file.
Default is to overwrite the output files in your working directory.
Parameters
----------
df : str
Pandas DataFrame of SRM Archive data
dir_path : str
working directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
spec_file : str
output specimens.txt file name
samp_file : str
output samples.txt file name
site_file : str
output sites.txt file name
comp_depth_key : str
if not "", there is a composite depth model, for example 'Depth CSF-B (m)'
lat : float
latitude of hole location
lon : float
longitude of hole location
NOTE: all output files will overwrite existing files.
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful,file names written)
### Response:
def iodp_samples_srm(df, spec_file=,samp_file="samples.txt",site_file="sites.txt",dir_path=,
input_dir_path=,comp_depth_key="",lat="",lon=""):
spec_reqd_columns=[,,,,,\
,]
samp_reqd_columns=[,,,,\
,,,,,,]
site_reqd_columns=[,,,,,,,\
,,\
,,,\
,,,,]
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
spec_out = os.path.join(output_dir_path, spec_file)
samp_out = os.path.join(output_dir_path, samp_file)
site_out = os.path.join(output_dir_path, site_file)
specimens_df=pd.DataFrame(columns = spec_reqd_columns)
samples_df=pd.DataFrame(columns = samp_reqd_columns)
sites_df=pd.DataFrame(columns = site_reqd_columns)
depth_key = "Depth CSF-A (m)"
text_key = "Text ID"
date_key = "Date sample logged"
volume_key =
holes,specimens=iodp_sample_names(df)
specimens_df[]=specimens
specimens_df[]=specimens
samples_df[]=specimens
samples_df[]=specimens
samples_df[]=df[depth_key]
if comp_depth_key: samples_df[]=df[comp_depth_key]
sites_df[]=specimens
sites_df[]=df[depth_key]
sites_df[]=lat
sites_df[]=lon
sites_df[]=
sites_df[]=
sites_df[]=
sites_df[]=holes[0]
sites_df[]="This study"
if comp_depth_key: sites_df[]=df[comp_depth_key]
samples_df[]=
samples_df[]=specimens
samples_df[]=
samples_df[]=
samples_df[]=
samples_df[]=
samples_df[]=
specimens_df[]=
specimens_df[]=
specimens_df[]=
specimens_df.fillna("",inplace=True)
samples_df.fillna("",inplace=True)
sites_df.fillna("",inplace=True)
spec_dicts = specimens_df.to_dict()
pmag.magic_write(spec_out, spec_dicts, )
samp_dicts = samples_df.to_dict()
pmag.magic_write(samp_out, samp_dicts, )
site_dicts = sites_df.to_dict()
pmag.magic_write(site_out, site_dicts, )
return holes[0],specimens |
def find_filepath(
filename,
basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, , , os.path.join(, ), )):
if os.path.isfile(filename):
return filename
for basedir in basepaths:
fullpath = expand_filepath(os.path.join(basedir, filename))
if os.path.isfile(fullpath):
return fullpath
return False | Given a filename or path see if it exists in any of the common places datafiles might be
>>> p = find_filepath('iq_test.csv')
>>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv'))
True
>>> p[-len('iq_test.csv'):]
'iq_test.csv'
>>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent')
False | ### Input:
Given a filename or path see if it exists in any of the common places datafiles might be
>>> p = find_filepath('iq_test.csv')
>>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv'))
True
>>> p[-len('iq_test.csv'):]
'iq_test.csv'
>>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent')
False
### Response:
def find_filepath(
filename,
basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, , , os.path.join(, ), )):
if os.path.isfile(filename):
return filename
for basedir in basepaths:
fullpath = expand_filepath(os.path.join(basedir, filename))
if os.path.isfile(fullpath):
return fullpath
return False |
def setExtension(self, ext):
if ext[0] != ".":
ext = "." + ext
self._ext = utils.asString(ext) | Set a new file extension for the sequence.
Note:
A leading period will be added if none is provided.
Args:
ext (str): the new file extension | ### Input:
Set a new file extension for the sequence.
Note:
A leading period will be added if none is provided.
Args:
ext (str): the new file extension
### Response:
def setExtension(self, ext):
if ext[0] != ".":
ext = "." + ext
self._ext = utils.asString(ext) |
def get_mappings(cls, index_name, doc_type):
return cache.get(cls.get_cache_item_name(index_name, doc_type), {}) | fetch mapped-items structure from cache | ### Input:
fetch mapped-items structure from cache
### Response:
def get_mappings(cls, index_name, doc_type):
return cache.get(cls.get_cache_item_name(index_name, doc_type), {}) |
def _find_immediately(self, locator, search_object=None):
search_object = self.driver if search_object is None else search_object
elements = self.locator_handler.find_by_locator(search_object, locator, True)
return [WebElementWrapper.WebElementWrapper(self, locator, element) for element in elements] | Attempts to immediately find elements on the page without waiting
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with. If null, search will be on self.driver
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True | ### Input:
Attempts to immediately find elements on the page without waiting
@type locator: webdriverwrapper.support.locator.Locator
@param locator: Locator object describing
@type search_object: webdriverwrapper.WebElementWrapper
@param search_object: Optional WebElement to start search with. If null, search will be on self.driver
@return: Single WebElemetnWrapper if find_all is False,
list of WebElementWrappers if find_all is True
### Response:
def _find_immediately(self, locator, search_object=None):
search_object = self.driver if search_object is None else search_object
elements = self.locator_handler.find_by_locator(search_object, locator, True)
return [WebElementWrapper.WebElementWrapper(self, locator, element) for element in elements] |
def bottom(self):
if self.vMerge is not None:
tc_below = self._tc_below
if tc_below is not None and tc_below.vMerge == ST_Merge.CONTINUE:
return tc_below.bottom
return self._tr_idx + 1 | The row index that marks the bottom extent of the vertical span of
this cell. This is one greater than the index of the bottom-most row
of the span, similar to how a slice of the cell's rows would be
specified. | ### Input:
The row index that marks the bottom extent of the vertical span of
this cell. This is one greater than the index of the bottom-most row
of the span, similar to how a slice of the cell's rows would be
specified.
### Response:
def bottom(self):
if self.vMerge is not None:
tc_below = self._tc_below
if tc_below is not None and tc_below.vMerge == ST_Merge.CONTINUE:
return tc_below.bottom
return self._tr_idx + 1 |
def is_address_commited(self, address):
try:
mbi = self.mquery(address)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror == win32.ERROR_INVALID_PARAMETER:
return False
raise
return mbi.is_commited() | Determines if an address belongs to a commited page.
@note: Returns always C{False} for kernel mode addresses.
@type address: int
@param address: Memory address to query.
@rtype: bool
@return: C{True} if the address belongs to a commited page.
@raise WindowsError: An exception is raised on error. | ### Input:
Determines if an address belongs to a commited page.
@note: Returns always C{False} for kernel mode addresses.
@type address: int
@param address: Memory address to query.
@rtype: bool
@return: C{True} if the address belongs to a commited page.
@raise WindowsError: An exception is raised on error.
### Response:
def is_address_commited(self, address):
try:
mbi = self.mquery(address)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror == win32.ERROR_INVALID_PARAMETER:
return False
raise
return mbi.is_commited() |
def delete_audit_sink(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_audit_sink_with_http_info(name, **kwargs)
else:
(data) = self.delete_audit_sink_with_http_info(name, **kwargs)
return data | delete an AuditSink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_audit_sink(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the AuditSink (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | ### Input:
delete an AuditSink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_audit_sink(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the AuditSink (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_audit_sink(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_audit_sink_with_http_info(name, **kwargs)
else:
(data) = self.delete_audit_sink_with_http_info(name, **kwargs)
return data |
def thread_function(self):
self.__subscribed = True
url = SUBSCRIBE_ENDPOINT + "?token=" + self._session_token
data = self._session.query(url, method=, raw=True, stream=True)
if not data or not data.ok:
_LOGGER.debug("Did not receive a valid response. Aborting..")
return None
self.__sseclient = sseclient.SSEClient(data)
try:
for event in (self.__sseclient).events():
if not self.__subscribed:
break
data = json.loads(event.data)
if data.get() == "connected":
_LOGGER.debug("Successfully subscribed this base station")
elif data.get():
action = data.get()
resource = data.get()
if action == "logout":
_LOGGER.debug("Logged out by some other entity")
self.__subscribed = False
break
elif action == "is" and "subscriptions/" not in resource:
self.__events.append(data)
self.__event_handle.set()
except TypeError as error:
_LOGGER.debug("Got unexpected error: %s", error)
return None
return True | Thread function. | ### Input:
Thread function.
### Response:
def thread_function(self):
self.__subscribed = True
url = SUBSCRIBE_ENDPOINT + "?token=" + self._session_token
data = self._session.query(url, method=, raw=True, stream=True)
if not data or not data.ok:
_LOGGER.debug("Did not receive a valid response. Aborting..")
return None
self.__sseclient = sseclient.SSEClient(data)
try:
for event in (self.__sseclient).events():
if not self.__subscribed:
break
data = json.loads(event.data)
if data.get() == "connected":
_LOGGER.debug("Successfully subscribed this base station")
elif data.get():
action = data.get()
resource = data.get()
if action == "logout":
_LOGGER.debug("Logged out by some other entity")
self.__subscribed = False
break
elif action == "is" and "subscriptions/" not in resource:
self.__events.append(data)
self.__event_handle.set()
except TypeError as error:
_LOGGER.debug("Got unexpected error: %s", error)
return None
return True |
def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None):
doc = load(input_stream)
verbose = doc.get_metadata(, False)
if search_dirs is None:
search_dirs = doc.get_metadata(, [])
if type(search_dirs) != list:
search_dirs = [search_dirs]
if in search_dirs:
data_dir = True
if in search_dirs:
sys_path = False
search_dirs = [dir_ for dir_ in search_dirs
if dir_ not in (, )]
if verbose:
debug(.format(data_dir, sys_path))
search_dirs = [p.normpath(p.expanduser(p.expandvars(dir_))) for dir_ in search_dirs]
if not panfl_:
search_dirs.append()
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += sys.path
else:
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += reduced_sys_path
msg = doc.get_metadata(, False)
if msg:
debug(msg)
if filters is None:
filters = doc.get_metadata(, [])
if type(filters) != list:
filters = [filters]
if filters:
if verbose:
msg = "panflute: will run the following filters:"
debug(msg, .join(filters))
doc = autorun_filters(filters, doc, search_dirs, verbose)
elif verbose:
debug("panflute: no filters were provided")
dump(doc, output_stream) | Reads JSON from stdin and second CLI argument:
``sys.argv[1]``. Dumps JSON doc to the stdout.
:param filters: Union[List[str], None]
if None then read from metadata
:param search_dirs: Union[List[str], None]
if None then read from metadata
:param data_dir: bool
:param sys_path: bool
:param panfl_: bool
:param input_stream: io.StringIO or None
for debug purpose
:param output_stream: io.StringIO or None
for debug purpose
:return: None | ### Input:
Reads JSON from stdin and second CLI argument:
``sys.argv[1]``. Dumps JSON doc to the stdout.
:param filters: Union[List[str], None]
if None then read from metadata
:param search_dirs: Union[List[str], None]
if None then read from metadata
:param data_dir: bool
:param sys_path: bool
:param panfl_: bool
:param input_stream: io.StringIO or None
for debug purpose
:param output_stream: io.StringIO or None
for debug purpose
:return: None
### Response:
def stdio(filters=None, search_dirs=None, data_dir=True, sys_path=True, panfl_=False, input_stream=None, output_stream=None):
doc = load(input_stream)
verbose = doc.get_metadata(, False)
if search_dirs is None:
search_dirs = doc.get_metadata(, [])
if type(search_dirs) != list:
search_dirs = [search_dirs]
if in search_dirs:
data_dir = True
if in search_dirs:
sys_path = False
search_dirs = [dir_ for dir_ in search_dirs
if dir_ not in (, )]
if verbose:
debug(.format(data_dir, sys_path))
search_dirs = [p.normpath(p.expanduser(p.expandvars(dir_))) for dir_ in search_dirs]
if not panfl_:
search_dirs.append()
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += sys.path
else:
if data_dir:
search_dirs.append(get_filter_dir())
if sys_path:
search_dirs += reduced_sys_path
msg = doc.get_metadata(, False)
if msg:
debug(msg)
if filters is None:
filters = doc.get_metadata(, [])
if type(filters) != list:
filters = [filters]
if filters:
if verbose:
msg = "panflute: will run the following filters:"
debug(msg, .join(filters))
doc = autorun_filters(filters, doc, search_dirs, verbose)
elif verbose:
debug("panflute: no filters were provided")
dump(doc, output_stream) |
def _retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2,
interval_max=30):
retries = 0
interval_range = __fxrange(interval_start,
interval_max + interval_start,
interval_step, repeatlast=True)
for retries in count():
try:
return fun(*args, **kwargs)
except catch as exc:
if max_retries and retries >= max_retries:
raise
tts = float(errback(exc, interval_range, retries) if errback
else next(interval_range))
if tts:
sleep(tts) | Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries. | ### Input:
Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
### Response:
def _retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2,
interval_max=30):
retries = 0
interval_range = __fxrange(interval_start,
interval_max + interval_start,
interval_step, repeatlast=True)
for retries in count():
try:
return fun(*args, **kwargs)
except catch as exc:
if max_retries and retries >= max_retries:
raise
tts = float(errback(exc, interval_range, retries) if errback
else next(interval_range))
if tts:
sleep(tts) |
def start(self):
napalm_logs_server_messages_received = Counter(
"napalm_logs_server_messages_received",
"Count of messages received from listener processes"
)
napalm_logs_server_skipped_buffered_messages = Counter(
,
,
[]
)
napalm_logs_server_messages_with_identified_os = Counter(
"napalm_logs_server_messages_with_identified_os",
"Count of messages with positive os identification",
[]
)
napalm_logs_server_messages_without_identified_os = Counter(
"napalm_logs_server_messages_without_identified_os",
"Count of messages with negative os identification"
)
napalm_logs_server_messages_failed_device_queuing = Counter(
"napalm_logs_server_messages_failed_device_queuing",
"Count of messages per device os that fail to be queued to a device process",
[]
)
napalm_logs_server_messages_device_queued = Counter(
"napalm_logs_server_messages_device_queued",
"Count of messages queued to device processes",
[]
)
napalm_logs_server_messages_unknown_queued = Counter(
"napalm_logs_server_messages_unknown_queued",
"Count of messages queued as unknown"
)
self._setup_ipc()
cleanup = threading.Thread(target=self._cleanup_buffer)
cleanup.start()
thread = threading.Thread(target=self._suicide_when_without_parent, args=(os.getppid(),))
thread.start()
signal.signal(signal.SIGTERM, self._exit_gracefully)
self.__up = True
while self.__up:
try:
bin_obj = self.sub.recv()
msg, address = umsgpack.unpackb(bin_obj, use_list=False)
except zmq.ZMQError as error:
if self.__up is False:
log.info()
return
else:
log.error(error, exc_info=True)
raise NapalmLogsExit(error)
if six.PY3:
msg = str(msg, )
else:
msg = msg.encode()
log.debug(, address, msg, time.time())
napalm_logs_server_messages_received.inc()
os_list = self._identify_os(msg)
for dev_os, msg_dict in os_list:
if dev_os and dev_os in self.started_os_proc:
log.debug(, dev_os)
log.debug(, dev_os)
if six.PY3:
dev_os = bytes(dev_os, )
if self._buffer:
message = .format(dev_os=dev_os,
host=msg_dict[],
msg=msg_dict[])
message_key = base64.b64encode(message)
if self._buffer[message_key]:
log.info(, msg_dict[])
napalm_logs_server_skipped_buffered_messages.labels(device_os=dev_os).inc()
continue
log.debug(, msg_dict[])
self._buffer[message_key] = 1
self.pub.send_multipart([dev_os,
umsgpack.packb((msg_dict, address))])
napalm_logs_server_messages_with_identified_os.labels(device_os=dev_os).inc()
napalm_logs_server_messages_device_queued.labels(device_os=dev_os).inc()
elif dev_os and dev_os not in self.started_os_proc:
log.info(, dev_os)
napalm_logs_server_messages_with_identified_os.labels(device_os=dev_os).inc()
napalm_logs_server_messages_failed_device_queuing.labels(device_os=dev_os).inc()
elif not dev_os and self.opts[]:
log.debug()
to_publish = {
: address,
: ,
: int(time.time()),
: msg_dict,
: UNKNOWN_DEVICE_NAME,
: ,
:
}
self.publisher_pub.send(umsgpack.packb(to_publish))
napalm_logs_server_messages_unknown_queued.inc()
napalm_logs_server_messages_without_identified_os.inc() | Take the messages from the queue,
inspect and identify the operating system,
then queue the message correspondingly. | ### Input:
Take the messages from the queue,
inspect and identify the operating system,
then queue the message correspondingly.
### Response:
def start(self):
napalm_logs_server_messages_received = Counter(
"napalm_logs_server_messages_received",
"Count of messages received from listener processes"
)
napalm_logs_server_skipped_buffered_messages = Counter(
,
,
[]
)
napalm_logs_server_messages_with_identified_os = Counter(
"napalm_logs_server_messages_with_identified_os",
"Count of messages with positive os identification",
[]
)
napalm_logs_server_messages_without_identified_os = Counter(
"napalm_logs_server_messages_without_identified_os",
"Count of messages with negative os identification"
)
napalm_logs_server_messages_failed_device_queuing = Counter(
"napalm_logs_server_messages_failed_device_queuing",
"Count of messages per device os that fail to be queued to a device process",
[]
)
napalm_logs_server_messages_device_queued = Counter(
"napalm_logs_server_messages_device_queued",
"Count of messages queued to device processes",
[]
)
napalm_logs_server_messages_unknown_queued = Counter(
"napalm_logs_server_messages_unknown_queued",
"Count of messages queued as unknown"
)
self._setup_ipc()
cleanup = threading.Thread(target=self._cleanup_buffer)
cleanup.start()
thread = threading.Thread(target=self._suicide_when_without_parent, args=(os.getppid(),))
thread.start()
signal.signal(signal.SIGTERM, self._exit_gracefully)
self.__up = True
while self.__up:
try:
bin_obj = self.sub.recv()
msg, address = umsgpack.unpackb(bin_obj, use_list=False)
except zmq.ZMQError as error:
if self.__up is False:
log.info()
return
else:
log.error(error, exc_info=True)
raise NapalmLogsExit(error)
if six.PY3:
msg = str(msg, )
else:
msg = msg.encode()
log.debug(, address, msg, time.time())
napalm_logs_server_messages_received.inc()
os_list = self._identify_os(msg)
for dev_os, msg_dict in os_list:
if dev_os and dev_os in self.started_os_proc:
log.debug(, dev_os)
log.debug(, dev_os)
if six.PY3:
dev_os = bytes(dev_os, )
if self._buffer:
message = .format(dev_os=dev_os,
host=msg_dict[],
msg=msg_dict[])
message_key = base64.b64encode(message)
if self._buffer[message_key]:
log.info(, msg_dict[])
napalm_logs_server_skipped_buffered_messages.labels(device_os=dev_os).inc()
continue
log.debug(, msg_dict[])
self._buffer[message_key] = 1
self.pub.send_multipart([dev_os,
umsgpack.packb((msg_dict, address))])
napalm_logs_server_messages_with_identified_os.labels(device_os=dev_os).inc()
napalm_logs_server_messages_device_queued.labels(device_os=dev_os).inc()
elif dev_os and dev_os not in self.started_os_proc:
log.info(, dev_os)
napalm_logs_server_messages_with_identified_os.labels(device_os=dev_os).inc()
napalm_logs_server_messages_failed_device_queuing.labels(device_os=dev_os).inc()
elif not dev_os and self.opts[]:
log.debug()
to_publish = {
: address,
: ,
: int(time.time()),
: msg_dict,
: UNKNOWN_DEVICE_NAME,
: ,
:
}
self.publisher_pub.send(umsgpack.packb(to_publish))
napalm_logs_server_messages_unknown_queued.inc()
napalm_logs_server_messages_without_identified_os.inc() |
def saturation(self, value):
value = clean_float(value)
if value is None:
return
try:
unit_moisture_weight = self.unit_moist_weight - self.unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self._pw
saturation = unit_moisture_volume / self._calc_unit_void_volume()
if saturation is not None and not ct.isclose(saturation, value, rel_tol=self._tolerance):
raise ModelError("New saturation (%.3f) is inconsistent "
"with calculated value (%.3f)" % (value, saturation))
except TypeError:
pass
old_value = self.saturation
self._saturation = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("saturation", value)
except ModelError as e:
self._saturation = old_value
raise ModelError(e) | Volume of water to volume of voids | ### Input:
Volume of water to volume of voids
### Response:
def saturation(self, value):
value = clean_float(value)
if value is None:
return
try:
unit_moisture_weight = self.unit_moist_weight - self.unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self._pw
saturation = unit_moisture_volume / self._calc_unit_void_volume()
if saturation is not None and not ct.isclose(saturation, value, rel_tol=self._tolerance):
raise ModelError("New saturation (%.3f) is inconsistent "
"with calculated value (%.3f)" % (value, saturation))
except TypeError:
pass
old_value = self.saturation
self._saturation = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("saturation", value)
except ModelError as e:
self._saturation = old_value
raise ModelError(e) |
def oldest_frame(self, raw=False):
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :] | Get the oldest frame in the panel. | ### Input:
Get the oldest frame in the panel.
### Response:
def oldest_frame(self, raw=False):
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :] |
def get_file_details(filepath, hash_algorithms=[]):
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
securesystemslib.formats.HASHALGORITHMS_SCHEMA.check_match(hash_algorithms)
file_hashes = {}
if not os.path.exists(filepath):
raise securesystemslib.exceptions.Error( + repr(filepath) +
)
filepath = os.path.abspath(filepath)
file_length = os.path.getsize(filepath)
for algorithm in hash_algorithms:
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm)
file_hashes.update({algorithm: digest_object.hexdigest()})
securesystemslib.formats.HASHDICT_SCHEMA.check_match(file_hashes)
return file_length, file_hashes | <Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.Error: If 'filepath' does not exist.
<Returns>
A tuple (length, hashes) describing 'filepath'. | ### Input:
<Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.Error: If 'filepath' does not exist.
<Returns>
A tuple (length, hashes) describing 'filepath'.
### Response:
def get_file_details(filepath, hash_algorithms=[]):
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
securesystemslib.formats.HASHALGORITHMS_SCHEMA.check_match(hash_algorithms)
file_hashes = {}
if not os.path.exists(filepath):
raise securesystemslib.exceptions.Error( + repr(filepath) +
)
filepath = os.path.abspath(filepath)
file_length = os.path.getsize(filepath)
for algorithm in hash_algorithms:
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm)
file_hashes.update({algorithm: digest_object.hexdigest()})
securesystemslib.formats.HASHDICT_SCHEMA.check_match(file_hashes)
return file_length, file_hashes |
def config_mode(self, config_command="config", pattern=">config"):
return super(RadETXBase, self).config_mode(
config_command=config_command, pattern=pattern
) | Enter into configuration mode on remote device. | ### Input:
Enter into configuration mode on remote device.
### Response:
def config_mode(self, config_command="config", pattern=">config"):
return super(RadETXBase, self).config_mode(
config_command=config_command, pattern=pattern
) |
def save(self, calc, session):
checksum = calc.get_checksum()
try:
existing_calc = session.query(model.Calculation).filter(model.Calculation.checksum == checksum).one()
except NoResultFound:
pass
else:
del calc
return None, "This calculation already exists!"
if not calc.download_size:
for f in calc.related_files:
calc.download_size += os.stat(f).st_size
ormcalc = model.Calculation(checksum = checksum)
if calc._calcset:
ormcalc.meta_data = model.Metadata(chemical_formula = calc.info[], download_size = calc.download_size)
for child in session.query(model.Calculation).filter(model.Calculation.checksum.in_(calc._calcset)).all():
ormcalc.children.append(child)
ormcalc.siblings_count = len(ormcalc.children)
ormcalc.nested_depth = calc._nested_depth
else:
if calc.phonons[]:
phonons_json = []
for bzpoint, frqset in calc.phonons[].items():
for i in range(0, len(calc.phonons[][bzpoint])):
for j in range(0, len(calc.phonons[][bzpoint][i])//3):
eigv = array([calc.phonons[][bzpoint][i][j*3], calc.phonons[][bzpoint][i][j*3+1], calc.phonons[][bzpoint][i][j*3+2]])
R = dot( eigv, calc.structures[-1].cell ).tolist()
calc.phonons[][bzpoint][i][j*3], calc.phonons[][bzpoint][i][j*3+1], calc.phonons[][bzpoint][i][j*3+2] = [round(x, 3) for x in R]
try: irreps = calc.phonons[][bzpoint]
except KeyError:
empty = []
for i in range(len(frqset)):
empty.append()
irreps = empty
phonons_json.append({ :bzpoint, :frqset, :irreps, :calc.phonons[][bzpoint] })
if bzpoint == :
phonons_json[-1][] = calc.phonons[]
phonons_json[-1][] = calc.phonons[]
if calc.phonons[]:
phonons_json[-1][] = calc.phonons[][bzpoint]
ormcalc.phonons = model.Phonons()
ormcalc.spectra.append( model.Spectra(kind = model.Spectra.PHONON, eigenvalues = json.dumps(phonons_json)) )
for task in [, ]:
if calc.electrons[task]:
calc.electrons[task] = calc.electrons[task].todict()
if calc.electrons[] or calc.electrons[]:
ormcalc.electrons = model.Electrons(gap = calc.info[])
if in calc.info:
ormcalc.electrons.is_direct = 1 if calc.info[] == else -1
ormcalc.spectra.append(model.Spectra(
kind = model.Spectra.ELECTRON,
dos = json.dumps(calc.electrons[]),
bands = json.dumps(calc.electrons[]),
projected = json.dumps(calc.electrons[]),
eigenvalues = json.dumps(calc.electrons[])
))
calc.related_files = list(map(virtualize_path, calc.related_files))
ormcalc.meta_data = model.Metadata(location = calc.info[], finished = calc.info[], raw_input = calc.info[], modeling_time = calc.info[], chemical_formula = html_formula(calc.info[]), download_size = calc.download_size, filenames = json.dumps(calc.related_files))
codefamily = model.Codefamily.as_unique(session, content = calc.info[])
codeversion = model.Codeversion.as_unique(session, content = calc.info[])
codeversion.instances.append( ormcalc.meta_data )
codefamily.versions.append( codeversion )
pot = model.Pottype.as_unique(session, name = calc.info[])
pot.instances.append(ormcalc)
ormcalc.recipinteg = model.Recipinteg(kgrid = calc.info[], kshift = calc.info[], smearing = calc.info[], smeartype = calc.info[])
ormcalc.basis = model.Basis(kind = calc.info[], content = json.dumps(calc.electrons[]) if calc.electrons[] else None)
ormcalc.energy = model.Energy(convergence = json.dumps(calc.convergence), total = calc.info[])
ormcalc.spacegroup = model.Spacegroup(n=calc.info[])
ormcalc.struct_ratios = model.Struct_ratios(chemical_formula=calc.info[], formula_units=calc.info[], nelem=calc.info[], dimensions=calc.info[])
if len(calc.tresholds) > 1:
ormcalc.struct_optimisation = model.Struct_optimisation(tresholds=json.dumps(calc.tresholds), ncycles=json.dumps(calc.ncycles))
for n, ase_repr in enumerate(calc.structures):
is_final = True if n == len(calc.structures)-1 else False
struct = model.Structure(step = n, final = is_final)
s = cell_to_cellpar(ase_repr.cell)
struct.lattice = model.Lattice(a=s[0], b=s[1], c=s[2], alpha=s[3], beta=s[4], gamma=s[5], a11=ase_repr.cell[0][0], a12=ase_repr.cell[0][1], a13=ase_repr.cell[0][2], a21=ase_repr.cell[1][0], a22=ase_repr.cell[1][1], a23=ase_repr.cell[1][2], a31=ase_repr.cell[2][0], a32=ase_repr.cell[2][1], a33=ase_repr.cell[2][2])
charges = ase_repr.get_array() if in ase_repr.arrays else [None for j in range(len(ase_repr))]
magmoms = ase_repr.get_array() if in ase_repr.arrays else [None for j in range(len(ase_repr))]
for n, i in enumerate(ase_repr):
struct.atoms.append( model.Atom( number=chemical_symbols.index(i.symbol), x=i.x, y=i.y, z=i.z, charge=charges[n], magmom=magmoms[n] ) )
ormcalc.structures.append(struct)
ormcalc.uigrid = model.Grid(info=json.dumps(calc.info))
uitopics = []
for entity in self.hierarchy:
if not entity[]:
continue
if entity[] or calc._calcset:
for item in calc.info.get( entity[], [] ):
uitopics.append( model.topic(cid=entity[], topic=item) )
else:
topic = calc.info.get(entity[])
if topic or not entity[]:
uitopics.append( model.topic(cid=entity[], topic=topic) )
uitopics = [model.Topic.as_unique(session, cid=x.cid, topic="%s" % x.topic) for x in uitopics]
ormcalc.uitopics.extend(uitopics)
if calc._calcset:
session.add(ormcalc)
else:
session.add_all([codefamily, codeversion, pot, ormcalc])
session.commit()
del calc, ormcalc
return checksum, None | Saves tilde_obj into the database
NB: this is the PUBLIC method
@returns checksum, error | ### Input:
Saves tilde_obj into the database
NB: this is the PUBLIC method
@returns checksum, error
### Response:
def save(self, calc, session):
checksum = calc.get_checksum()
try:
existing_calc = session.query(model.Calculation).filter(model.Calculation.checksum == checksum).one()
except NoResultFound:
pass
else:
del calc
return None, "This calculation already exists!"
if not calc.download_size:
for f in calc.related_files:
calc.download_size += os.stat(f).st_size
ormcalc = model.Calculation(checksum = checksum)
if calc._calcset:
ormcalc.meta_data = model.Metadata(chemical_formula = calc.info[], download_size = calc.download_size)
for child in session.query(model.Calculation).filter(model.Calculation.checksum.in_(calc._calcset)).all():
ormcalc.children.append(child)
ormcalc.siblings_count = len(ormcalc.children)
ormcalc.nested_depth = calc._nested_depth
else:
if calc.phonons[]:
phonons_json = []
for bzpoint, frqset in calc.phonons[].items():
for i in range(0, len(calc.phonons[][bzpoint])):
for j in range(0, len(calc.phonons[][bzpoint][i])//3):
eigv = array([calc.phonons[][bzpoint][i][j*3], calc.phonons[][bzpoint][i][j*3+1], calc.phonons[][bzpoint][i][j*3+2]])
R = dot( eigv, calc.structures[-1].cell ).tolist()
calc.phonons[][bzpoint][i][j*3], calc.phonons[][bzpoint][i][j*3+1], calc.phonons[][bzpoint][i][j*3+2] = [round(x, 3) for x in R]
try: irreps = calc.phonons[][bzpoint]
except KeyError:
empty = []
for i in range(len(frqset)):
empty.append()
irreps = empty
phonons_json.append({ :bzpoint, :frqset, :irreps, :calc.phonons[][bzpoint] })
if bzpoint == :
phonons_json[-1][] = calc.phonons[]
phonons_json[-1][] = calc.phonons[]
if calc.phonons[]:
phonons_json[-1][] = calc.phonons[][bzpoint]
ormcalc.phonons = model.Phonons()
ormcalc.spectra.append( model.Spectra(kind = model.Spectra.PHONON, eigenvalues = json.dumps(phonons_json)) )
for task in [, ]:
if calc.electrons[task]:
calc.electrons[task] = calc.electrons[task].todict()
if calc.electrons[] or calc.electrons[]:
ormcalc.electrons = model.Electrons(gap = calc.info[])
if in calc.info:
ormcalc.electrons.is_direct = 1 if calc.info[] == else -1
ormcalc.spectra.append(model.Spectra(
kind = model.Spectra.ELECTRON,
dos = json.dumps(calc.electrons[]),
bands = json.dumps(calc.electrons[]),
projected = json.dumps(calc.electrons[]),
eigenvalues = json.dumps(calc.electrons[])
))
calc.related_files = list(map(virtualize_path, calc.related_files))
ormcalc.meta_data = model.Metadata(location = calc.info[], finished = calc.info[], raw_input = calc.info[], modeling_time = calc.info[], chemical_formula = html_formula(calc.info[]), download_size = calc.download_size, filenames = json.dumps(calc.related_files))
codefamily = model.Codefamily.as_unique(session, content = calc.info[])
codeversion = model.Codeversion.as_unique(session, content = calc.info[])
codeversion.instances.append( ormcalc.meta_data )
codefamily.versions.append( codeversion )
pot = model.Pottype.as_unique(session, name = calc.info[])
pot.instances.append(ormcalc)
ormcalc.recipinteg = model.Recipinteg(kgrid = calc.info[], kshift = calc.info[], smearing = calc.info[], smeartype = calc.info[])
ormcalc.basis = model.Basis(kind = calc.info[], content = json.dumps(calc.electrons[]) if calc.electrons[] else None)
ormcalc.energy = model.Energy(convergence = json.dumps(calc.convergence), total = calc.info[])
ormcalc.spacegroup = model.Spacegroup(n=calc.info[])
ormcalc.struct_ratios = model.Struct_ratios(chemical_formula=calc.info[], formula_units=calc.info[], nelem=calc.info[], dimensions=calc.info[])
if len(calc.tresholds) > 1:
ormcalc.struct_optimisation = model.Struct_optimisation(tresholds=json.dumps(calc.tresholds), ncycles=json.dumps(calc.ncycles))
for n, ase_repr in enumerate(calc.structures):
is_final = True if n == len(calc.structures)-1 else False
struct = model.Structure(step = n, final = is_final)
s = cell_to_cellpar(ase_repr.cell)
struct.lattice = model.Lattice(a=s[0], b=s[1], c=s[2], alpha=s[3], beta=s[4], gamma=s[5], a11=ase_repr.cell[0][0], a12=ase_repr.cell[0][1], a13=ase_repr.cell[0][2], a21=ase_repr.cell[1][0], a22=ase_repr.cell[1][1], a23=ase_repr.cell[1][2], a31=ase_repr.cell[2][0], a32=ase_repr.cell[2][1], a33=ase_repr.cell[2][2])
charges = ase_repr.get_array() if in ase_repr.arrays else [None for j in range(len(ase_repr))]
magmoms = ase_repr.get_array() if in ase_repr.arrays else [None for j in range(len(ase_repr))]
for n, i in enumerate(ase_repr):
struct.atoms.append( model.Atom( number=chemical_symbols.index(i.symbol), x=i.x, y=i.y, z=i.z, charge=charges[n], magmom=magmoms[n] ) )
ormcalc.structures.append(struct)
ormcalc.uigrid = model.Grid(info=json.dumps(calc.info))
uitopics = []
for entity in self.hierarchy:
if not entity[]:
continue
if entity[] or calc._calcset:
for item in calc.info.get( entity[], [] ):
uitopics.append( model.topic(cid=entity[], topic=item) )
else:
topic = calc.info.get(entity[])
if topic or not entity[]:
uitopics.append( model.topic(cid=entity[], topic=topic) )
uitopics = [model.Topic.as_unique(session, cid=x.cid, topic="%s" % x.topic) for x in uitopics]
ormcalc.uitopics.extend(uitopics)
if calc._calcset:
session.add(ormcalc)
else:
session.add_all([codefamily, codeversion, pot, ormcalc])
session.commit()
del calc, ormcalc
return checksum, None |
def clean(self, ):
if self.startframe > self.endframe:
raise ValidationError("Shot starts before it ends: Framerange(%s - %s)" % (self.startframe, self.endframe)) | Reimplemented from :class:`models.Model`. Check if startframe is before endframe
:returns: None
:rtype: None
:raises: ValidationError | ### Input:
Reimplemented from :class:`models.Model`. Check if startframe is before endframe
:returns: None
:rtype: None
:raises: ValidationError
### Response:
def clean(self, ):
if self.startframe > self.endframe:
raise ValidationError("Shot starts before it ends: Framerange(%s - %s)" % (self.startframe, self.endframe)) |
def quadrant(xcoord, ycoord):
xneg = bool(xcoord < 0)
yneg = bool(ycoord < 0)
if xneg is True:
if yneg is False:
return 2
return 3
if yneg is False:
return 1
return 4 | Find the quadrant a pair of coordinates are located in
:type xcoord: integer
:param xcoord: The x coordinate to find the quadrant for
:type ycoord: integer
:param ycoord: The y coordinate to find the quadrant for | ### Input:
Find the quadrant a pair of coordinates are located in
:type xcoord: integer
:param xcoord: The x coordinate to find the quadrant for
:type ycoord: integer
:param ycoord: The y coordinate to find the quadrant for
### Response:
def quadrant(xcoord, ycoord):
xneg = bool(xcoord < 0)
yneg = bool(ycoord < 0)
if xneg is True:
if yneg is False:
return 2
return 3
if yneg is False:
return 1
return 4 |
def object_len(node, context=None):
from astroid.objects import FrozenSet
inferred_node = safe_infer(node, context=context)
if inferred_node is None or inferred_node is util.Uninferable:
raise exceptions.InferenceError(node=node)
if isinstance(inferred_node, nodes.Const) and isinstance(
inferred_node.value, (bytes, str)
):
return len(inferred_node.value)
if isinstance(inferred_node, (nodes.List, nodes.Set, nodes.Tuple, FrozenSet)):
return len(inferred_node.elts)
if isinstance(inferred_node, nodes.Dict):
return len(inferred_node.items)
try:
node_type = object_type(inferred_node, context=context)
len_call = next(node_type.igetattr("__len__", context=context))
except exceptions.AttributeInferenceError:
raise exceptions.AstroidTypeError(
"object of type has no len()".format(len_call.pytype())
)
result_of_len = next(len_call.infer_call_result(node, context))
if (
isinstance(result_of_len, nodes.Const)
and result_of_len.pytype() == "builtins.int"
):
return result_of_len.value
raise exceptions.AstroidTypeError(
" object cannot be interpreted as an integer".format(result_of_len)
) | Infer length of given node object
:param Union[nodes.ClassDef, nodes.Instance] node:
:param node: Node to infer length of
:raises AstroidTypeError: If an invalid node is returned
from __len__ method or no __len__ method exists
:raises InferenceError: If the given node cannot be inferred
or if multiple nodes are inferred
:rtype int: Integer length of node | ### Input:
Infer length of given node object
:param Union[nodes.ClassDef, nodes.Instance] node:
:param node: Node to infer length of
:raises AstroidTypeError: If an invalid node is returned
from __len__ method or no __len__ method exists
:raises InferenceError: If the given node cannot be inferred
or if multiple nodes are inferred
:rtype int: Integer length of node
### Response:
def object_len(node, context=None):
from astroid.objects import FrozenSet
inferred_node = safe_infer(node, context=context)
if inferred_node is None or inferred_node is util.Uninferable:
raise exceptions.InferenceError(node=node)
if isinstance(inferred_node, nodes.Const) and isinstance(
inferred_node.value, (bytes, str)
):
return len(inferred_node.value)
if isinstance(inferred_node, (nodes.List, nodes.Set, nodes.Tuple, FrozenSet)):
return len(inferred_node.elts)
if isinstance(inferred_node, nodes.Dict):
return len(inferred_node.items)
try:
node_type = object_type(inferred_node, context=context)
len_call = next(node_type.igetattr("__len__", context=context))
except exceptions.AttributeInferenceError:
raise exceptions.AstroidTypeError(
"object of type has no len()".format(len_call.pytype())
)
result_of_len = next(len_call.infer_call_result(node, context))
if (
isinstance(result_of_len, nodes.Const)
and result_of_len.pytype() == "builtins.int"
):
return result_of_len.value
raise exceptions.AstroidTypeError(
" object cannot be interpreted as an integer".format(result_of_len)
) |
def canonicalize(self, path):
if os.path.isabs(path):
out = os.path.normpath(path)
else:
out = os.path.normpath( + path)
if sys.platform == :
out = out.replace(, )
return out | Return the canonical form of a path on the server. For example,
if the server's home folder is C{/home/foo}, the path
C{"../betty"} would be canonicalized to C{"/home/betty"}. Note
the obvious security issues: if you're serving files only from a
specific folder, you probably don't want this method to reveal path
names outside that folder.
You may find the python methods in C{os.path} useful, especially
C{os.path.normpath} and C{os.path.realpath}.
The default implementation returns C{os.path.normpath('/' + path)}. | ### Input:
Return the canonical form of a path on the server. For example,
if the server's home folder is C{/home/foo}, the path
C{"../betty"} would be canonicalized to C{"/home/betty"}. Note
the obvious security issues: if you're serving files only from a
specific folder, you probably don't want this method to reveal path
names outside that folder.
You may find the python methods in C{os.path} useful, especially
C{os.path.normpath} and C{os.path.realpath}.
The default implementation returns C{os.path.normpath('/' + path)}.
### Response:
def canonicalize(self, path):
if os.path.isabs(path):
out = os.path.normpath(path)
else:
out = os.path.normpath( + path)
if sys.platform == :
out = out.replace(, )
return out |
def old_indices(self, names, axis=None):
warnings.warn("Matrix.old_indices() is deprecated - only here for testing. Use Matrix.indices()",PyemuWarning)
row_idxs, col_idxs = [], []
for name in names:
if name.lower() not in self.col_names \
and name.lower() not in self.row_names:
raise Exception( + name)
if name.lower() in self.col_names:
col_idxs.append(self.col_names.index(name))
if name.lower() in self.row_names:
row_idxs.append(self.row_names.index(name))
if axis is None:
return np.array(row_idxs, dtype=np.int32),\
np.array(col_idxs, dtype=np.int32)
elif axis == 0:
if len(row_idxs) != len(names):
raise Exception("Matrix.indices(): " +
"not all names found in row_names")
return np.array(row_idxs, dtype=np.int32)
elif axis == 1:
if len(col_idxs) != len(names):
raise Exception("Matrix.indices(): " +
"not all names found in col_names")
return np.array(col_idxs, dtype=np.int32)
else:
raise Exception("Matrix.indices(): " +
"axis argument must 0 or 1, not:" + str(axis)) | get the row and col indices of names. If axis is None, two ndarrays
are returned, corresponding the indices of names for each axis
Parameters
----------
names : iterable
column and/or row names
axis : (int) (optional)
the axis to search.
Returns
-------
numpy.ndarray : numpy.ndarray
indices of names. | ### Input:
get the row and col indices of names. If axis is None, two ndarrays
are returned, corresponding the indices of names for each axis
Parameters
----------
names : iterable
column and/or row names
axis : (int) (optional)
the axis to search.
Returns
-------
numpy.ndarray : numpy.ndarray
indices of names.
### Response:
def old_indices(self, names, axis=None):
warnings.warn("Matrix.old_indices() is deprecated - only here for testing. Use Matrix.indices()",PyemuWarning)
row_idxs, col_idxs = [], []
for name in names:
if name.lower() not in self.col_names \
and name.lower() not in self.row_names:
raise Exception( + name)
if name.lower() in self.col_names:
col_idxs.append(self.col_names.index(name))
if name.lower() in self.row_names:
row_idxs.append(self.row_names.index(name))
if axis is None:
return np.array(row_idxs, dtype=np.int32),\
np.array(col_idxs, dtype=np.int32)
elif axis == 0:
if len(row_idxs) != len(names):
raise Exception("Matrix.indices(): " +
"not all names found in row_names")
return np.array(row_idxs, dtype=np.int32)
elif axis == 1:
if len(col_idxs) != len(names):
raise Exception("Matrix.indices(): " +
"not all names found in col_names")
return np.array(col_idxs, dtype=np.int32)
else:
raise Exception("Matrix.indices(): " +
"axis argument must 0 or 1, not:" + str(axis)) |
def sparse_angular_random_projection_split(inds, indptr, data, indices, rng_state):
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_inds = inds[indptr[left] : indptr[left + 1]]
left_data = data[indptr[left] : indptr[left + 1]]
right_inds = inds[indptr[right] : indptr[right + 1]]
right_data = data[indptr[right] : indptr[right + 1]]
left_norm = norm(left_data)
right_norm = norm(right_data)
if abs(left_norm) < EPS:
left_norm = 1.0
if abs(right_norm) < EPS:
right_norm = 1.0
normalized_left_data = left_data / left_norm
normalized_right_data = right_data / right_norm
hyperplane_inds, hyperplane_data = sparse_diff(
left_inds, normalized_left_data, right_inds, normalized_right_data
)
hyperplane_norm = norm(hyperplane_data)
if abs(hyperplane_norm) < EPS:
hyperplane_norm = 1.0
for d in range(hyperplane_data.shape[0]):
hyperplane_data[d] = hyperplane_data[d] / hyperplane_norm
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = 0.0
i_inds = inds[indptr[indices[i]] : indptr[indices[i] + 1]]
i_data = data[indptr[indices[i]] : indptr[indices[i] + 1]]
mul_inds, mul_data = sparse_mul(
hyperplane_inds, hyperplane_data, i_inds, i_data
)
for d in range(mul_data.shape[0]):
margin += mul_data[d]
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
hyperplane = np.vstack((hyperplane_inds, hyperplane_data))
return indices_left, indices_right, hyperplane, None | Given a set of ``indices`` for data points from a sparse data set
presented in csr sparse format as inds, indptr and data, create
a random hyperplane to split the data, returning two arrays indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses cosine distance to determine the hyperplane
and which side each data sample falls on.
Parameters
----------
inds: array
CSR format index array of the matrix
indptr: array
CSR format index pointer array of the matrix
data: array
CSR format data array of the matrix
indices: array of shape (tree_node_size,)
The indices of the elements in the ``data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane. | ### Input:
Given a set of ``indices`` for data points from a sparse data set
presented in csr sparse format as inds, indptr and data, create
a random hyperplane to split the data, returning two arrays indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses cosine distance to determine the hyperplane
and which side each data sample falls on.
Parameters
----------
inds: array
CSR format index array of the matrix
indptr: array
CSR format index pointer array of the matrix
data: array
CSR format data array of the matrix
indices: array of shape (tree_node_size,)
The indices of the elements in the ``data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane.
### Response:
def sparse_angular_random_projection_split(inds, indptr, data, indices, rng_state):
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
left_inds = inds[indptr[left] : indptr[left + 1]]
left_data = data[indptr[left] : indptr[left + 1]]
right_inds = inds[indptr[right] : indptr[right + 1]]
right_data = data[indptr[right] : indptr[right + 1]]
left_norm = norm(left_data)
right_norm = norm(right_data)
if abs(left_norm) < EPS:
left_norm = 1.0
if abs(right_norm) < EPS:
right_norm = 1.0
normalized_left_data = left_data / left_norm
normalized_right_data = right_data / right_norm
hyperplane_inds, hyperplane_data = sparse_diff(
left_inds, normalized_left_data, right_inds, normalized_right_data
)
hyperplane_norm = norm(hyperplane_data)
if abs(hyperplane_norm) < EPS:
hyperplane_norm = 1.0
for d in range(hyperplane_data.shape[0]):
hyperplane_data[d] = hyperplane_data[d] / hyperplane_norm
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = 0.0
i_inds = inds[indptr[indices[i]] : indptr[indices[i] + 1]]
i_data = data[indptr[indices[i]] : indptr[indices[i] + 1]]
mul_inds, mul_data = sparse_mul(
hyperplane_inds, hyperplane_data, i_inds, i_data
)
for d in range(mul_data.shape[0]):
margin += mul_data[d]
if abs(margin) < EPS:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
hyperplane = np.vstack((hyperplane_inds, hyperplane_data))
return indices_left, indices_right, hyperplane, None |
def warning(self, flag_message="Warning", padding=None, force=False):
if self.should_log(self.WARNING) or force:
self._print_message(
flag_message=flag_message, color=colors.warning_color,
padding=padding) | Log Level: :attr:WARNING
@flag_message: #str flags the message with the given text
using :func:flag
@padding: #str 'top', 'bottom' or 'all', adds a new line to the
specified area with :func:padd
@color: #str colorizes @flag_message using :func:colorize
@force: #bool whether or not to force the message to log in spite
of the assigned log level
..
from vital.debug import Logg
logg = Logg(loglevel="v")
logg("World").warning("Hello")
# (Hello) World
logg("Hello world").warning()
# (Warning) Hello world
.. | ### Input:
Log Level: :attr:WARNING
@flag_message: #str flags the message with the given text
using :func:flag
@padding: #str 'top', 'bottom' or 'all', adds a new line to the
specified area with :func:padd
@color: #str colorizes @flag_message using :func:colorize
@force: #bool whether or not to force the message to log in spite
of the assigned log level
..
from vital.debug import Logg
logg = Logg(loglevel="v")
logg("World").warning("Hello")
# (Hello) World
logg("Hello world").warning()
# (Warning) Hello world
..
### Response:
def warning(self, flag_message="Warning", padding=None, force=False):
if self.should_log(self.WARNING) or force:
self._print_message(
flag_message=flag_message, color=colors.warning_color,
padding=padding) |
def createCategoryFilter(self, positiveExamples):
categoryFilter = self._fullClient.createCategoryFilter("CategoryFilter", positiveExamples)
return categoryFilter.positions | Creates a filter fingerprint.
Args:
positiveExamples, list(str): The list of positive example texts.
Returns:
list of int: the positions representing the filter representing the texts
Raises:
CorticalioException: if the request was not successful | ### Input:
Creates a filter fingerprint.
Args:
positiveExamples, list(str): The list of positive example texts.
Returns:
list of int: the positions representing the filter representing the texts
Raises:
CorticalioException: if the request was not successful
### Response:
def createCategoryFilter(self, positiveExamples):
categoryFilter = self._fullClient.createCategoryFilter("CategoryFilter", positiveExamples)
return categoryFilter.positions |
def _run__cherrypy(app, config, mode):
assert mode == "cherrypy-wsgiserver"
try:
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
_logger.warning("WARNING: cherrypy.wsgiserver is deprecated.")
_logger.warning(
" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver"
)
_logger.warning(" was moved to the cheroot project.")
_logger.warning(" Consider using --server=cheroot.")
except ImportError:
_logger.error("*" * 78)
_logger.error("ERROR: Could not import cherrypy.wsgiserver.")
_logger.error(
"Try `pip install cherrypy` or specify another server using the --server option."
)
_logger.error("Note that starting with CherryPy 9.0, the server was moved to")
_logger.error(
"the cheroot project, so it is recommended to use `-server=cheroot`"
)
_logger.error("and run `pip install cheroot` instead.")
_logger.error("*" * 78)
raise
server_name = "WsgiDAV/{} {} Python/{}".format(
__version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION
)
wsgiserver.CherryPyWSGIServer.version = server_name
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
protocol = "http"
if ssl_certificate:
assert ssl_private_key
wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter(
ssl_certificate, ssl_private_key, ssl_certificate_chain
)
protocol = "https"
_logger.info("SSL / HTTPS enabled.")
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
"server_name": server_name,
}
server_args.update(config.get("server_args", {}))
server = wsgiserver.CherryPyWSGIServer(**server_args)
startup_event = config.get("startup_event")
if startup_event:
def _patched_tick():
server.tick = org_tick
org_tick()
_logger.info("CherryPyWSGIServer is ready")
startup_event.set()
org_tick = server.tick
server.tick = _patched_tick
try:
server.start()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
finally:
server.stop()
return | Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed. | ### Input:
Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.
### Response:
def _run__cherrypy(app, config, mode):
assert mode == "cherrypy-wsgiserver"
try:
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
_logger.warning("WARNING: cherrypy.wsgiserver is deprecated.")
_logger.warning(
" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver"
)
_logger.warning(" was moved to the cheroot project.")
_logger.warning(" Consider using --server=cheroot.")
except ImportError:
_logger.error("*" * 78)
_logger.error("ERROR: Could not import cherrypy.wsgiserver.")
_logger.error(
"Try `pip install cherrypy` or specify another server using the --server option."
)
_logger.error("Note that starting with CherryPy 9.0, the server was moved to")
_logger.error(
"the cheroot project, so it is recommended to use `-server=cheroot`"
)
_logger.error("and run `pip install cheroot` instead.")
_logger.error("*" * 78)
raise
server_name = "WsgiDAV/{} {} Python/{}".format(
__version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION
)
wsgiserver.CherryPyWSGIServer.version = server_name
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
protocol = "http"
if ssl_certificate:
assert ssl_private_key
wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter(
ssl_certificate, ssl_private_key, ssl_certificate_chain
)
protocol = "https"
_logger.info("SSL / HTTPS enabled.")
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
"server_name": server_name,
}
server_args.update(config.get("server_args", {}))
server = wsgiserver.CherryPyWSGIServer(**server_args)
startup_event = config.get("startup_event")
if startup_event:
def _patched_tick():
server.tick = org_tick
org_tick()
_logger.info("CherryPyWSGIServer is ready")
startup_event.set()
org_tick = server.tick
server.tick = _patched_tick
try:
server.start()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
finally:
server.stop()
return |
def varOr(population, toolbox, lambda_, cxpb, mutpb):
offspring = []
for _ in range(lambda_):
op_choice = np.random.random()
if op_choice < cxpb:
ind1, ind2 = pick_two_individuals_eligible_for_crossover(population)
if ind1 is not None:
ind1, _ = toolbox.mate(ind1, ind2)
del ind1.fitness.values
else:
ind1 = mutate_random_individual(population, toolbox)
offspring.append(ind1)
elif op_choice < cxpb + mutpb:
ind = mutate_random_individual(population, toolbox)
offspring.append(ind)
else:
idx = np.random.randint(0, len(population))
offspring.append(toolbox.clone(population[idx]))
return offspring | Part of an evolutionary algorithm applying only the variation part
(crossover, mutation **or** reproduction). The modified individuals have
their fitness invalidated. The individuals are cloned so returned
population is independent of the input population.
:param population: A list of individuals to vary.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param lambda\_: The number of children to produce
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
The variation goes as follow. On each of the *lambda_* iteration, it
selects one of the three operations; crossover, mutation or reproduction.
In the case of a crossover, two individuals are selected at random from
the parental population :math:`P_\mathrm{p}`, those individuals are cloned
using the :meth:`toolbox.clone` method and then mated using the
:meth:`toolbox.mate` method. Only the first child is appended to the
offspring population :math:`P_\mathrm{o}`, the second child is discarded.
In the case of a mutation, one individual is selected at random from
:math:`P_\mathrm{p}`, it is cloned and then mutated using using the
:meth:`toolbox.mutate` method. The resulting mutant is appended to
:math:`P_\mathrm{o}`. In the case of a reproduction, one individual is
selected at random from :math:`P_\mathrm{p}`, cloned and appended to
:math:`P_\mathrm{o}`.
This variation is named *Or* beceause an offspring will never result from
both operations crossover and mutation. The sum of both probabilities
shall be in :math:`[0, 1]`, the reproduction probability is
1 - *cxpb* - *mutpb*. | ### Input:
Part of an evolutionary algorithm applying only the variation part
(crossover, mutation **or** reproduction). The modified individuals have
their fitness invalidated. The individuals are cloned so returned
population is independent of the input population.
:param population: A list of individuals to vary.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param lambda\_: The number of children to produce
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
The variation goes as follow. On each of the *lambda_* iteration, it
selects one of the three operations; crossover, mutation or reproduction.
In the case of a crossover, two individuals are selected at random from
the parental population :math:`P_\mathrm{p}`, those individuals are cloned
using the :meth:`toolbox.clone` method and then mated using the
:meth:`toolbox.mate` method. Only the first child is appended to the
offspring population :math:`P_\mathrm{o}`, the second child is discarded.
In the case of a mutation, one individual is selected at random from
:math:`P_\mathrm{p}`, it is cloned and then mutated using using the
:meth:`toolbox.mutate` method. The resulting mutant is appended to
:math:`P_\mathrm{o}`. In the case of a reproduction, one individual is
selected at random from :math:`P_\mathrm{p}`, cloned and appended to
:math:`P_\mathrm{o}`.
This variation is named *Or* beceause an offspring will never result from
both operations crossover and mutation. The sum of both probabilities
shall be in :math:`[0, 1]`, the reproduction probability is
1 - *cxpb* - *mutpb*.
### Response:
def varOr(population, toolbox, lambda_, cxpb, mutpb):
offspring = []
for _ in range(lambda_):
op_choice = np.random.random()
if op_choice < cxpb:
ind1, ind2 = pick_two_individuals_eligible_for_crossover(population)
if ind1 is not None:
ind1, _ = toolbox.mate(ind1, ind2)
del ind1.fitness.values
else:
ind1 = mutate_random_individual(population, toolbox)
offspring.append(ind1)
elif op_choice < cxpb + mutpb:
ind = mutate_random_individual(population, toolbox)
offspring.append(ind)
else:
idx = np.random.randint(0, len(population))
offspring.append(toolbox.clone(population[idx]))
return offspring |
def list_all(course, single=None):
def bs(val):
return "●" if val else " "
def bc(val):
return as_success("✔") if val else as_error("✘")
def format_line(exercise):
return "{0} │ {1} │ {2} │ {3} │ {4}".format(exercise.tid,
bs(exercise.is_selected),
bc(exercise.is_downloaded),
bc(exercise.is_completed),
exercise.menuname())
print("ID{0}│ S │ D │ C │ Name".format(
(len(str(course.exercises[0].tid)) - 1) * " "
))
if single:
print(format_line(single))
return
for exercise in course.exercises:
print(format_line(exercise)) | Lists all of the exercises in the current course. | ### Input:
Lists all of the exercises in the current course.
### Response:
def list_all(course, single=None):
def bs(val):
return "●" if val else " "
def bc(val):
return as_success("✔") if val else as_error("✘")
def format_line(exercise):
return "{0} │ {1} │ {2} │ {3} │ {4}".format(exercise.tid,
bs(exercise.is_selected),
bc(exercise.is_downloaded),
bc(exercise.is_completed),
exercise.menuname())
print("ID{0}│ S │ D │ C │ Name".format(
(len(str(course.exercises[0].tid)) - 1) * " "
))
if single:
print(format_line(single))
return
for exercise in course.exercises:
print(format_line(exercise)) |
def convert(self):
try:
local_file = self.getter.get()
except (exceptions.NoSuchPackageException, OSError) as e:
logger.error(
"Failed and exiting:", exc_info=True)
logger.info("Pyp2rpm failed. See log for more info.")
sys.exit(e)
self.name, self.version = self.getter.get_name_version()
self.local_file = local_file
data = self.metadata_extractor.extract_data(self.client)
logger.debug("Extracted metadata:")
logger.debug(pprint.pformat(data.data))
self.merge_versions(data)
jinja_env = jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader([]),
jinja2.PackageLoader(, ), ]))
for filter in filters.__all__:
jinja_env.filters[filter.__name__] = filter
try:
jinja_template = jinja_env.get_template(
os.path.abspath(self.template))
except jinja2.exceptions.TemplateNotFound:
logger.warn(
.format(
self.template, os.path.abspath(self.template)))
jinja_template = jinja_env.get_template(self.template)
logger.info(.format(self.template))
ret = jinja_template.render(data=data, name_convertor=name_convertor)
return re.sub(r, "\n", ret) | Returns RPM SPECFILE.
Returns:
rendered RPM SPECFILE. | ### Input:
Returns RPM SPECFILE.
Returns:
rendered RPM SPECFILE.
### Response:
def convert(self):
try:
local_file = self.getter.get()
except (exceptions.NoSuchPackageException, OSError) as e:
logger.error(
"Failed and exiting:", exc_info=True)
logger.info("Pyp2rpm failed. See log for more info.")
sys.exit(e)
self.name, self.version = self.getter.get_name_version()
self.local_file = local_file
data = self.metadata_extractor.extract_data(self.client)
logger.debug("Extracted metadata:")
logger.debug(pprint.pformat(data.data))
self.merge_versions(data)
jinja_env = jinja2.Environment(loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader([]),
jinja2.PackageLoader(, ), ]))
for filter in filters.__all__:
jinja_env.filters[filter.__name__] = filter
try:
jinja_template = jinja_env.get_template(
os.path.abspath(self.template))
except jinja2.exceptions.TemplateNotFound:
logger.warn(
.format(
self.template, os.path.abspath(self.template)))
jinja_template = jinja_env.get_template(self.template)
logger.info(.format(self.template))
ret = jinja_template.render(data=data, name_convertor=name_convertor)
return re.sub(r, "\n", ret) |
def extract_locals(trcback):
output = []
stack = extract_stack(get_inner_most_frame(trcback))
for frame, file_name, line_number, name, context, index in stack:
args_names, nameless, keyword = extract_arguments(frame)
arguments, nameless_args, keyword_args, locals = OrderedDict(), [], {}, {}
for key, data in frame.f_locals.iteritems():
if key == nameless:
nameless_args = map(repr, frame.f_locals.get(nameless, ()))
elif key == keyword:
keyword_args = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems())
elif key in args_names:
arguments[key] = repr(data)
else:
locals[key] = repr(data)
output.append(((name, file_name, line_number), (arguments, nameless_args, keyword_args, locals)))
return output | Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list | ### Input:
Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list
### Response:
def extract_locals(trcback):
output = []
stack = extract_stack(get_inner_most_frame(trcback))
for frame, file_name, line_number, name, context, index in stack:
args_names, nameless, keyword = extract_arguments(frame)
arguments, nameless_args, keyword_args, locals = OrderedDict(), [], {}, {}
for key, data in frame.f_locals.iteritems():
if key == nameless:
nameless_args = map(repr, frame.f_locals.get(nameless, ()))
elif key == keyword:
keyword_args = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems())
elif key in args_names:
arguments[key] = repr(data)
else:
locals[key] = repr(data)
output.append(((name, file_name, line_number), (arguments, nameless_args, keyword_args, locals)))
return output |
def buy_product(self, product_pk):
if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists() \
or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists():
return True
else:
return False | determina si el customer ha comprado un producto | ### Input:
determina si el customer ha comprado un producto
### Response:
def buy_product(self, product_pk):
if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists() \
or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists():
return True
else:
return False |
def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
ncbi = NCBIGene(self.graph_type, self.are_bnodes_skized)
gene_group = ncbi.files[]
self.fetch_from_url(
gene_group[], .join((ncbi.rawdir, gene_group[])), False)
self.omim_type = self.find_omim_type()
return | :param is_dl_forced:
:return: | ### Input:
:param is_dl_forced:
:return:
### Response:
def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
ncbi = NCBIGene(self.graph_type, self.are_bnodes_skized)
gene_group = ncbi.files[]
self.fetch_from_url(
gene_group[], .join((ncbi.rawdir, gene_group[])), False)
self.omim_type = self.find_omim_type()
return |
def upgrade():
config_paths = context.config.get_main_option()
filenames = fedmsg.config._gather_configs_in(config_paths)
config = fedmsg.config.load_config(filenames=filenames)
make_processors(**config)
engine = op.get_bind().engine
m.init(engine=engine)
for msg in _page(m.Message.query.order_by(m.Message.timestamp)):
print("processing %s %s" % (msg.timestamp, msg.topic))
if msg.users and msg.packages:
continue
changed = False
if not msg.users:
new_usernames = msg2usernames(msg.__json__(), **config)
print("Updating users to %r" % new_usernames)
changed = changed or new_usernames
for new_username in new_usernames:
new_user = m.User.get_or_create(new_username)
msg.users.append(new_user)
if not msg.packages:
new_packagenames = msg2packages(msg.__json__(), **config)
print("Updating packages to %r" % new_packagenames)
changed = changed or new_usernames
for new_packagename in new_packagenames:
new_package = m.Package.get_or_create(new_packagename)
msg.packages.append(new_package)
if changed and random.random() < 0.01:
print(" * Saving!")
m.session.commit()
m.session.commit() | This takes a *really* long time. Like, hours. | ### Input:
This takes a *really* long time. Like, hours.
### Response:
def upgrade():
config_paths = context.config.get_main_option()
filenames = fedmsg.config._gather_configs_in(config_paths)
config = fedmsg.config.load_config(filenames=filenames)
make_processors(**config)
engine = op.get_bind().engine
m.init(engine=engine)
for msg in _page(m.Message.query.order_by(m.Message.timestamp)):
print("processing %s %s" % (msg.timestamp, msg.topic))
if msg.users and msg.packages:
continue
changed = False
if not msg.users:
new_usernames = msg2usernames(msg.__json__(), **config)
print("Updating users to %r" % new_usernames)
changed = changed or new_usernames
for new_username in new_usernames:
new_user = m.User.get_or_create(new_username)
msg.users.append(new_user)
if not msg.packages:
new_packagenames = msg2packages(msg.__json__(), **config)
print("Updating packages to %r" % new_packagenames)
changed = changed or new_usernames
for new_packagename in new_packagenames:
new_package = m.Package.get_or_create(new_packagename)
msg.packages.append(new_package)
if changed and random.random() < 0.01:
print(" * Saving!")
m.session.commit()
m.session.commit() |
def insert(self, index: int, item: object) -> None:
self._blueprints.insert(index, item) | The Abstract class `MutableSequence` leverages this insert method to
perform the `BlueprintGroup.append` operation.
:param index: Index to use for removing a new Blueprint item
:param item: New `Blueprint` object.
:return: None | ### Input:
The Abstract class `MutableSequence` leverages this insert method to
perform the `BlueprintGroup.append` operation.
:param index: Index to use for removing a new Blueprint item
:param item: New `Blueprint` object.
:return: None
### Response:
def insert(self, index: int, item: object) -> None:
self._blueprints.insert(index, item) |
def get_html_string(self):
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree) | Generates if need be and returns a simpler html string with
extracted text | ### Input:
Generates if need be and returns a simpler html string with
extracted text
### Response:
def get_html_string(self):
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree) |
def list_holds(pattern=__HOLD_PATTERN, full=True):
r**
_check_versionlock()
out = __salt__[]([_yum(), , ],
python_shell=False)
ret = []
for line in salt.utils.itertools.split(out, ):
match = _get_hold(line, pattern=pattern, full=full)
if match is not None:
ret.append(match)
return ret | r'''
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
Function renamed from ``pkg.get_locked_pkgs`` to ``pkg.list_holds``.
List information on locked packages
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
pattern : \w+(?:[.-][^-]+)*
Regular expression used to match the package name
full : True
Show the full hold definition including version and epoch. Set to
``False`` to return just the name of the package(s) being held.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_holds
salt '*' pkg.list_holds full=False | ### Input:
r'''
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
Function renamed from ``pkg.get_locked_pkgs`` to ``pkg.list_holds``.
List information on locked packages
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
pattern : \w+(?:[.-][^-]+)*
Regular expression used to match the package name
full : True
Show the full hold definition including version and epoch. Set to
``False`` to return just the name of the package(s) being held.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_holds
salt '*' pkg.list_holds full=False
### Response:
def list_holds(pattern=__HOLD_PATTERN, full=True):
r**
_check_versionlock()
out = __salt__[]([_yum(), , ],
python_shell=False)
ret = []
for line in salt.utils.itertools.split(out, ):
match = _get_hold(line, pattern=pattern, full=full)
if match is not None:
ret.append(match)
return ret |
def get_statuses(self):
return github.PaginatedList.PaginatedList(
github.CommitStatus.CommitStatus,
self._requester,
self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
None
) | :calls: `GET /repos/:owner/:repo/statuses/:ref <http://developer.github.com/v3/repos/statuses>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitStatus.CommitStatus` | ### Input:
:calls: `GET /repos/:owner/:repo/statuses/:ref <http://developer.github.com/v3/repos/statuses>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitStatus.CommitStatus`
### Response:
def get_statuses(self):
return github.PaginatedList.PaginatedList(
github.CommitStatus.CommitStatus,
self._requester,
self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
None
) |
def set_attrs(self, **attrs):
self.attrs.update(attrs)
self._backend.set_attrs(**attrs) | Set model attributes, e.g. input resistance of a cell. | ### Input:
Set model attributes, e.g. input resistance of a cell.
### Response:
def set_attrs(self, **attrs):
self.attrs.update(attrs)
self._backend.set_attrs(**attrs) |
def delete_instance_if_removed(self, port):
instance_type = self.get_instance_type(port)
if not instance_type:
return
if not db_lib.instance_provisioned(port[]):
i_res = MechResource(port[], instance_type,
a_const.DELETE)
self.provision_queue.put(i_res) | Enqueue instance delete if it's no longer in the db | ### Input:
Enqueue instance delete if it's no longer in the db
### Response:
def delete_instance_if_removed(self, port):
instance_type = self.get_instance_type(port)
if not instance_type:
return
if not db_lib.instance_provisioned(port[]):
i_res = MechResource(port[], instance_type,
a_const.DELETE)
self.provision_queue.put(i_res) |
def elemd(item, inset):
assert isinstance(inset, stypes.SpiceCell)
assert inset.dtype == 1
item = ctypes.c_double(item)
return bool(libspice.elemd_c(item, ctypes.byref(inset))) | Determine whether an item is an element of a double precision set.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/elemd_c.html
:param item: Item to be tested.
:type item: float
:param inset: Set to be tested.
:type inset: spiceypy.utils.support_types.SpiceCell
:return: True if item is an element of set.
:rtype: bool | ### Input:
Determine whether an item is an element of a double precision set.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/elemd_c.html
:param item: Item to be tested.
:type item: float
:param inset: Set to be tested.
:type inset: spiceypy.utils.support_types.SpiceCell
:return: True if item is an element of set.
:rtype: bool
### Response:
def elemd(item, inset):
assert isinstance(inset, stypes.SpiceCell)
assert inset.dtype == 1
item = ctypes.c_double(item)
return bool(libspice.elemd_c(item, ctypes.byref(inset))) |
def has_attribute(self, name, alias=False):
prop_dict = merge_dicts(self.__attributes__,
self.__fields__,
self.__relations__)
if alias:
prop_dict.update({v.alias : v for v in prop_dict.values() if v.alias is not None})
return name in prop_dict | Check if the entity contains the attribute *name* | ### Input:
Check if the entity contains the attribute *name*
### Response:
def has_attribute(self, name, alias=False):
prop_dict = merge_dicts(self.__attributes__,
self.__fields__,
self.__relations__)
if alias:
prop_dict.update({v.alias : v for v in prop_dict.values() if v.alias is not None})
return name in prop_dict |
def add_scalar_summary(self, x, tag=None):
if not self.summary_collections:
return
with self.g.as_default():
tag = tag or _tag_for(x.name)
summary = (tf.summary.scalar(
tag, x, collections=self.summary_collections))
return summary | Adds a scalar summary for x. | ### Input:
Adds a scalar summary for x.
### Response:
def add_scalar_summary(self, x, tag=None):
if not self.summary_collections:
return
with self.g.as_default():
tag = tag or _tag_for(x.name)
summary = (tf.summary.scalar(
tag, x, collections=self.summary_collections))
return summary |
def load_pickled_model(filename, dirname=None):
if dirname is None:
pkg_filename = pkgutil.get_loader().get_filename()
pkg_dirname = os.path.dirname(pkg_filename)
dirname = os.path.join(pkg_dirname, , model_path)
filepath = os.path.join(dirname, filename)
return joblib.load(filepath) | Load a pickled ``Extractor`` model from disk.
Args:
filename (str): Name of pickled model file under ``dirname``.
dirname (str): Name of directory on disk containing the pickled model.
If None, dragnet's default pickled model directory is used:
/path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION]
Returns:
:class:`dragnet.extractor.Extractor` | ### Input:
Load a pickled ``Extractor`` model from disk.
Args:
filename (str): Name of pickled model file under ``dirname``.
dirname (str): Name of directory on disk containing the pickled model.
If None, dragnet's default pickled model directory is used:
/path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION]
Returns:
:class:`dragnet.extractor.Extractor`
### Response:
def load_pickled_model(filename, dirname=None):
if dirname is None:
pkg_filename = pkgutil.get_loader().get_filename()
pkg_dirname = os.path.dirname(pkg_filename)
dirname = os.path.join(pkg_dirname, , model_path)
filepath = os.path.join(dirname, filename)
return joblib.load(filepath) |
def noisy_and(self, num_classes, trainable=True):
assert self.input.get_shape()[3] == num_classes
scope =
with tf.variable_scope(scope):
a = self.const_variable(name=, shape=[1], value=1.0, trainable=trainable)
b = self.const_variable(name=, shape=[1, num_classes], value=0.0, trainable=trainable)
mean = tf.reduce_mean(self.input, axis=[1, 2])
self.input = (tf.nn.sigmoid(a * (mean - b)) - tf.nn.sigmoid(-a * b)) / (
tf.sigmoid(a * (1 - b)) - tf.sigmoid(-a * b))
print(scope + + str(self.input.get_shape())) | Multiple Instance Learning (MIL), flexible pooling function
:param num_classes: int, determine number of output maps | ### Input:
Multiple Instance Learning (MIL), flexible pooling function
:param num_classes: int, determine number of output maps
### Response:
def noisy_and(self, num_classes, trainable=True):
assert self.input.get_shape()[3] == num_classes
scope =
with tf.variable_scope(scope):
a = self.const_variable(name=, shape=[1], value=1.0, trainable=trainable)
b = self.const_variable(name=, shape=[1, num_classes], value=0.0, trainable=trainable)
mean = tf.reduce_mean(self.input, axis=[1, 2])
self.input = (tf.nn.sigmoid(a * (mean - b)) - tf.nn.sigmoid(-a * b)) / (
tf.sigmoid(a * (1 - b)) - tf.sigmoid(-a * b))
print(scope + + str(self.input.get_shape())) |
def rescan_all(host):
*
if os.path.isdir(.format(host)):
cmd = .format(host)
else:
return .format(host)
return __salt__[](cmd).splitlines() | List scsi devices
CLI Example:
.. code-block:: bash
salt '*' scsi.rescan_all 0 | ### Input:
List scsi devices
CLI Example:
.. code-block:: bash
salt '*' scsi.rescan_all 0
### Response:
def rescan_all(host):
*
if os.path.isdir(.format(host)):
cmd = .format(host)
else:
return .format(host)
return __salt__[](cmd).splitlines() |
def shutdown(self):
if not self._exited:
self._exited = True
if self._task_runner.is_alive():
self._task_runner.terminate()
if self._command_server.is_alive():
if self._task_runner.is_alive():
self._task_runner.join()
_shutdown_pipe(self._pipe)
self._task.stop() | Shuts down the daemon process. | ### Input:
Shuts down the daemon process.
### Response:
def shutdown(self):
if not self._exited:
self._exited = True
if self._task_runner.is_alive():
self._task_runner.terminate()
if self._command_server.is_alive():
if self._task_runner.is_alive():
self._task_runner.join()
_shutdown_pipe(self._pipe)
self._task.stop() |
def list_addresses(self, tag_values=None):
title = % self.__class__.__name__
input_fields = {
: tag_values
}
for key, value in input_fields.items():
if value:
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
kw_args = {}
tag_text =
if tag_values:
kw_args = {
: [ { : , : tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value =
if len(tag_values) > 1:
plural_value =
tag_text = % (plural_value, join_words(tag_values))
self.iam.printer( % (self.iam.region_name, tag_text))
address_list = []
try:
response = self.connection.describe_addresses(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response[]
for address in response_list:
address_list.append(address[])
return address_list | a method to list elastic ip addresses associated with account on AWS
:param tag_values: [optional] list of tag values
:return: list of strings with ip addresses | ### Input:
a method to list elastic ip addresses associated with account on AWS
:param tag_values: [optional] list of tag values
:return: list of strings with ip addresses
### Response:
def list_addresses(self, tag_values=None):
title = % self.__class__.__name__
input_fields = {
: tag_values
}
for key, value in input_fields.items():
if value:
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
kw_args = {}
tag_text =
if tag_values:
kw_args = {
: [ { : , : tag_values } ]
}
from labpack.parsing.grammar import join_words
plural_value =
if len(tag_values) > 1:
plural_value =
tag_text = % (plural_value, join_words(tag_values))
self.iam.printer( % (self.iam.region_name, tag_text))
address_list = []
try:
response = self.connection.describe_addresses(**kw_args)
except:
raise AWSConnectionError(title)
response_list = response[]
for address in response_list:
address_list.append(address[])
return address_list |
def _class_defining_method(meth):
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return .format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split(, 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit(, 1)[0])
if isinstance(cls, type):
return .format(cls.__module__, cls.__name__) | Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545. | ### Input:
Gets the name of the class that defines meth.
Adapted from
http://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545.
### Response:
def _class_defining_method(meth):
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return .format(cls.__module__, cls.__name__)
meth = meth.__func__
if inspect.isfunction(meth):
module = meth.__qualname__.split(, 1)[0]
cls = getattr(inspect.getmodule(meth), module.rsplit(, 1)[0])
if isinstance(cls, type):
return .format(cls.__module__, cls.__name__) |
def set_features(self, filter_type):
elements_to_split = {: self.allpsms, : self.allpeps}
self.features = self.splitfunc(elements_to_split, self.ns, filter_type) | Calls splitter to split percolator output into target/decoy
elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway. | ### Input:
Calls splitter to split percolator output into target/decoy
elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
### Response:
def set_features(self, filter_type):
elements_to_split = {: self.allpsms, : self.allpeps}
self.features = self.splitfunc(elements_to_split, self.ns, filter_type) |
def main():
usage = (
)
parser = optparse.OptionParser(usage=usage)
parser.add_option(, default=None,
help=
)
parser.add_option(, type=, default=None,
help=
)
parser.add_option(, dest=, default=,
help=
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close() | Main function. | ### Input:
Main function.
### Response:
def main():
usage = (
)
parser = optparse.OptionParser(usage=usage)
parser.add_option(, default=None,
help=
)
parser.add_option(, type=, default=None,
help=
)
parser.add_option(, dest=, default=,
help=
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
sys.exit(1)
xml_path = args[0]
client = MongoClient(options.host, options.port)
germanet_db = client[options.database_name]
lex_files, gn_rels_file, wiktionary_files, ili_files = \
find_germanet_xml_files(xml_path)
insert_lexical_information(germanet_db, lex_files)
insert_relation_information(germanet_db, gn_rels_file)
insert_paraphrase_information(germanet_db, wiktionary_files)
insert_lemmatisation_data(germanet_db)
insert_infocontent_data(germanet_db)
compute_max_min_depth(germanet_db)
client.close() |
def new_nick(self):
old = self.nick
self.nick = % (self.base_nick, random.randint(1, 1000))
self.logger.warn( % (old, self.nick))
self.register_nick()
self.handle_nick_change(old, self.nick) | \
Generates a new nickname based on original nickname followed by a
random number | ### Input:
\
Generates a new nickname based on original nickname followed by a
random number
### Response:
def new_nick(self):
old = self.nick
self.nick = % (self.base_nick, random.randint(1, 1000))
self.logger.warn( % (old, self.nick))
self.register_nick()
self.handle_nick_change(old, self.nick) |
def __read_single_fasta_query_lines(f):
def readline():
l = f.readline()
if l == :
raise EOFError()
return l
rec = None
try:
l = readline()
assert l.startswith()
rec = [l]
while True:
pos = f.tell()
l = readline()
if l.startswith():
f.seek(pos, 0)
break
rec += [l]
except EOFError:
pass
return rec | Read and return sequence of lines (including newlines) that
represent a single FASTA query record. The provided file is
expected to be blocking.
Returns None if there are no more query sequences in the file. | ### Input:
Read and return sequence of lines (including newlines) that
represent a single FASTA query record. The provided file is
expected to be blocking.
Returns None if there are no more query sequences in the file.
### Response:
def __read_single_fasta_query_lines(f):
def readline():
l = f.readline()
if l == :
raise EOFError()
return l
rec = None
try:
l = readline()
assert l.startswith()
rec = [l]
while True:
pos = f.tell()
l = readline()
if l.startswith():
f.seek(pos, 0)
break
rec += [l]
except EOFError:
pass
return rec |
def is_touch(self):
if self in {type(self).TOUCH_DOWN, type(self).TOUCH_UP,
type(self).TOUCH_MOTION, type(self).TOUCH_CANCEL,
type(self).TOUCH_FRAME}:
return True
else:
return False | Macro to check if this event is
a :class:`~libinput.event.TouchEvent`. | ### Input:
Macro to check if this event is
a :class:`~libinput.event.TouchEvent`.
### Response:
def is_touch(self):
if self in {type(self).TOUCH_DOWN, type(self).TOUCH_UP,
type(self).TOUCH_MOTION, type(self).TOUCH_CANCEL,
type(self).TOUCH_FRAME}:
return True
else:
return False |
def getOutputDevice(self, textureType):
fn = self.function_table.getOutputDevice
pnDevice = c_uint64()
pInstance = VkInstance_T()
fn(byref(pnDevice), textureType, byref(pInstance))
return pnDevice.value, pInstance | * Returns platform- and texture-type specific adapter identification so that applications and the
compositor are creating textures and swap chains on the same GPU. If an error occurs the device
will be set to 0.
pInstance is an optional parameter that is required only when textureType is TextureType_Vulkan.
[D3D10/11/12 Only (D3D9 Not Supported)]
Returns the adapter LUID that identifies the GPU attached to the HMD. The user should
enumerate all adapters using IDXGIFactory::EnumAdapters and IDXGIAdapter::GetDesc to find
the adapter with the matching LUID, or use IDXGIFactory4::EnumAdapterByLuid.
The discovered IDXGIAdapter should be used to create the device and swap chain.
[Vulkan Only]
Returns the VkPhysicalDevice that should be used by the application.
pInstance must be the instance the application will use to query for the VkPhysicalDevice. The application
must create the VkInstance with extensions returned by IVRCompositor::GetVulkanInstanceExtensionsRequired enabled.
[macOS Only]
For TextureType_IOSurface returns the id<MTLDevice> that should be used by the application.
On 10.13+ for TextureType_OpenGL returns the 'registryId' of the renderer which should be used
by the application. See Apple Technical Q&A QA1168 for information on enumerating GL Renderers, and the
new kCGLRPRegistryIDLow and kCGLRPRegistryIDHigh CGLRendererProperty values in the 10.13 SDK.
Pre 10.13 for TextureType_OpenGL returns 0, as there is no dependable way to correlate the HMDs MTLDevice
with a GL Renderer. | ### Input:
* Returns platform- and texture-type specific adapter identification so that applications and the
compositor are creating textures and swap chains on the same GPU. If an error occurs the device
will be set to 0.
pInstance is an optional parameter that is required only when textureType is TextureType_Vulkan.
[D3D10/11/12 Only (D3D9 Not Supported)]
Returns the adapter LUID that identifies the GPU attached to the HMD. The user should
enumerate all adapters using IDXGIFactory::EnumAdapters and IDXGIAdapter::GetDesc to find
the adapter with the matching LUID, or use IDXGIFactory4::EnumAdapterByLuid.
The discovered IDXGIAdapter should be used to create the device and swap chain.
[Vulkan Only]
Returns the VkPhysicalDevice that should be used by the application.
pInstance must be the instance the application will use to query for the VkPhysicalDevice. The application
must create the VkInstance with extensions returned by IVRCompositor::GetVulkanInstanceExtensionsRequired enabled.
[macOS Only]
For TextureType_IOSurface returns the id<MTLDevice> that should be used by the application.
On 10.13+ for TextureType_OpenGL returns the 'registryId' of the renderer which should be used
by the application. See Apple Technical Q&A QA1168 for information on enumerating GL Renderers, and the
new kCGLRPRegistryIDLow and kCGLRPRegistryIDHigh CGLRendererProperty values in the 10.13 SDK.
Pre 10.13 for TextureType_OpenGL returns 0, as there is no dependable way to correlate the HMDs MTLDevice
with a GL Renderer.
### Response:
def getOutputDevice(self, textureType):
fn = self.function_table.getOutputDevice
pnDevice = c_uint64()
pInstance = VkInstance_T()
fn(byref(pnDevice), textureType, byref(pInstance))
return pnDevice.value, pInstance |
def render(self, program: moderngl.Program, mode=None, vertices=-1, first=0, instances=1):
vao = self.instance(program)
if mode is None:
mode = self.mode
vao.render(mode, vertices=vertices, first=first, instances=instances) | Render the VAO.
Args:
program: The ``moderngl.Program``
Keyword Args:
mode: Override the draw mode (``TRIANGLES`` etc)
vertices (int): The number of vertices to transform
first (int): The index of the first vertex to start with
instances (int): The number of instances | ### Input:
Render the VAO.
Args:
program: The ``moderngl.Program``
Keyword Args:
mode: Override the draw mode (``TRIANGLES`` etc)
vertices (int): The number of vertices to transform
first (int): The index of the first vertex to start with
instances (int): The number of instances
### Response:
def render(self, program: moderngl.Program, mode=None, vertices=-1, first=0, instances=1):
vao = self.instance(program)
if mode is None:
mode = self.mode
vao.render(mode, vertices=vertices, first=first, instances=instances) |
def read_json_flag(fobj):
if isinstance(fobj, string_types):
with open(fobj, ) as fobj2:
return read_json_flag(fobj2)
txt = fobj.read()
if isinstance(txt, bytes):
txt = txt.decode()
data = json.loads(txt)
name = .format(**data)
out = DataQualityFlag(name, active=data[],
known=data[])
try:
out.description = data[].get(, None)
except KeyError:
return out | Read a `DataQualityFlag` from a segments-web.ligo.org JSON file | ### Input:
Read a `DataQualityFlag` from a segments-web.ligo.org JSON file
### Response:
def read_json_flag(fobj):
if isinstance(fobj, string_types):
with open(fobj, ) as fobj2:
return read_json_flag(fobj2)
txt = fobj.read()
if isinstance(txt, bytes):
txt = txt.decode()
data = json.loads(txt)
name = .format(**data)
out = DataQualityFlag(name, active=data[],
known=data[])
try:
out.description = data[].get(, None)
except KeyError:
return out |
def parse_json(self, req, name, field):
if not (req.body and is_json_request(req)):
return core.missing
json_data = req.json
if json_data is None:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True) | Pull a json value from the request. | ### Input:
Pull a json value from the request.
### Response:
def parse_json(self, req, name, field):
if not (req.body and is_json_request(req)):
return core.missing
json_data = req.json
if json_data is None:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True) |
def init_publisher(app):
@app.context_processor
def inject_links():
return {
: stack.top.websub_self_url,
: stack.top.websub_hub_url,
: stack.top.websub_self_link,
: stack.top.websub_hub_link,
} | Calling this with your flask app as argument is required for the
publisher decorator to work. | ### Input:
Calling this with your flask app as argument is required for the
publisher decorator to work.
### Response:
def init_publisher(app):
@app.context_processor
def inject_links():
return {
: stack.top.websub_self_url,
: stack.top.websub_hub_url,
: stack.top.websub_self_link,
: stack.top.websub_hub_link,
} |
def paintEvent(self, event):
painter = QPainter(self)
painter.fillRect(event.rect(), self.palette().color(QPalette.Window))
block = self._qpart.firstVisibleBlock()
blockBoundingGeometry = self._qpart.blockBoundingGeometry(block).translated(self._qpart.contentOffset())
top = blockBoundingGeometry.top()
bottom = top + blockBoundingGeometry.height()
for block in qutepart.iterateBlocksFrom(block):
height = self._qpart.blockBoundingGeometry(block).height()
if top > event.rect().bottom():
break
if block.isVisible() and \
bottom >= event.rect().top():
if block.blockNumber() in self._qpart.lintMarks:
msgType, msgText = self._qpart.lintMarks[block.blockNumber()]
pixMap = self._lintPixmaps[msgType]
yPos = top + ((height - pixMap.height()) / 2)
painter.drawPixmap(0, yPos, pixMap)
if self.isBlockMarked(block):
yPos = top + ((height - self._bookmarkPixmap.height()) / 2)
painter.drawPixmap(0, yPos, self._bookmarkPixmap)
top += height | QWidget.paintEvent() implementation
Draw markers | ### Input:
QWidget.paintEvent() implementation
Draw markers
### Response:
def paintEvent(self, event):
painter = QPainter(self)
painter.fillRect(event.rect(), self.palette().color(QPalette.Window))
block = self._qpart.firstVisibleBlock()
blockBoundingGeometry = self._qpart.blockBoundingGeometry(block).translated(self._qpart.contentOffset())
top = blockBoundingGeometry.top()
bottom = top + blockBoundingGeometry.height()
for block in qutepart.iterateBlocksFrom(block):
height = self._qpart.blockBoundingGeometry(block).height()
if top > event.rect().bottom():
break
if block.isVisible() and \
bottom >= event.rect().top():
if block.blockNumber() in self._qpart.lintMarks:
msgType, msgText = self._qpart.lintMarks[block.blockNumber()]
pixMap = self._lintPixmaps[msgType]
yPos = top + ((height - pixMap.height()) / 2)
painter.drawPixmap(0, yPos, pixMap)
if self.isBlockMarked(block):
yPos = top + ((height - self._bookmarkPixmap.height()) / 2)
painter.drawPixmap(0, yPos, self._bookmarkPixmap)
top += height |
def _run_raw(self, cmd, ignore_errors=False):
result = os.system(cmd)
if result != 0:
if ignore_errors:
self.log(f"command ({cmd}) failed.")
assert False, "_run_raw failed" | Runs command directly, skipping tmux interface | ### Input:
Runs command directly, skipping tmux interface
### Response:
def _run_raw(self, cmd, ignore_errors=False):
result = os.system(cmd)
if result != 0:
if ignore_errors:
self.log(f"command ({cmd}) failed.")
assert False, "_run_raw failed" |
def delete_object(self, obj, view_kwargs):
if obj is None:
url_field = getattr(self, , )
filter_value = view_kwargs[url_field]
raise ObjectNotFound(.format(self.model.__name__, filter_value),
source={: url_field})
self.before_delete_object(obj, view_kwargs)
self.session.delete(obj)
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Delete object error: " + str(e))
self.after_delete_object(obj, view_kwargs) | Delete an object through sqlalchemy
:param DeclarativeMeta item: an item from sqlalchemy
:param dict view_kwargs: kwargs from the resource view | ### Input:
Delete an object through sqlalchemy
:param DeclarativeMeta item: an item from sqlalchemy
:param dict view_kwargs: kwargs from the resource view
### Response:
def delete_object(self, obj, view_kwargs):
if obj is None:
url_field = getattr(self, , )
filter_value = view_kwargs[url_field]
raise ObjectNotFound(.format(self.model.__name__, filter_value),
source={: url_field})
self.before_delete_object(obj, view_kwargs)
self.session.delete(obj)
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Delete object error: " + str(e))
self.after_delete_object(obj, view_kwargs) |
def expiration_maintenance(self):
if self.cellarray[self.refresh_head] != 0:
self.cellarray[self.refresh_head] -= 1
self.refresh_head = (self.refresh_head + 1) % self.nbr_bits | Decrement cell value if not zero
This maintenance process need to executed each self.compute_refresh_time() | ### Input:
Decrement cell value if not zero
This maintenance process need to executed each self.compute_refresh_time()
### Response:
def expiration_maintenance(self):
if self.cellarray[self.refresh_head] != 0:
self.cellarray[self.refresh_head] -= 1
self.refresh_head = (self.refresh_head + 1) % self.nbr_bits |
def main():
global PANDOCVERSION
global Image
fmt = args.fmt
doc = json.loads(STDIN.read())
PANDOCVERSION = pandocxnos.init(args.pandocversion, doc)
if PANDOCVERSION < :
Image = elt(, 2)
meta = doc[] if PANDOCVERSION >= else doc[0][]
blocks = doc[] if PANDOCVERSION >= else doc[1:]
process(meta)
attach_attrs_image = attach_attrs_factory(Image,
extract_attrs=_extract_attrs)
detach_attrs_image = detach_attrs_factory(Image)
insert_secnos = insert_secnos_factory(Image)
delete_secnos = delete_secnos_factory(Image)
filters = [insert_secnos, process_figures, delete_secnos] \
if PANDOCVERSION >= else \
[attach_attrs_image, insert_secnos, process_figures,
delete_secnos, detach_attrs_image]
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
filters, blocks)
process_refs = process_refs_factory(references.keys())
replace_refs = replace_refs_factory(references,
use_cleveref_default, False,
plusname if not capitalize else
[name.title() for name in plusname],
starname, )
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
[repair_refs, process_refs, replace_refs],
altered)
if fmt == :
rawblocks = []
if has_unnumbered_figures:
rawblocks += [RawBlock(, TEX0),
RawBlock(, TEX1),
RawBlock(, TEX2)]
if captionname != :
rawblocks += [RawBlock(, TEX3 % captionname)]
insert_rawblocks = insert_rawblocks_factory(rawblocks)
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
[insert_rawblocks], altered)
if PANDOCVERSION >= :
doc[] = altered
else:
doc = doc[:1] + altered
json.dump(doc, STDOUT)
STDOUT.flush() | Filters the document AST. | ### Input:
Filters the document AST.
### Response:
def main():
global PANDOCVERSION
global Image
fmt = args.fmt
doc = json.loads(STDIN.read())
PANDOCVERSION = pandocxnos.init(args.pandocversion, doc)
if PANDOCVERSION < :
Image = elt(, 2)
meta = doc[] if PANDOCVERSION >= else doc[0][]
blocks = doc[] if PANDOCVERSION >= else doc[1:]
process(meta)
attach_attrs_image = attach_attrs_factory(Image,
extract_attrs=_extract_attrs)
detach_attrs_image = detach_attrs_factory(Image)
insert_secnos = insert_secnos_factory(Image)
delete_secnos = delete_secnos_factory(Image)
filters = [insert_secnos, process_figures, delete_secnos] \
if PANDOCVERSION >= else \
[attach_attrs_image, insert_secnos, process_figures,
delete_secnos, detach_attrs_image]
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
filters, blocks)
process_refs = process_refs_factory(references.keys())
replace_refs = replace_refs_factory(references,
use_cleveref_default, False,
plusname if not capitalize else
[name.title() for name in plusname],
starname, )
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
[repair_refs, process_refs, replace_refs],
altered)
if fmt == :
rawblocks = []
if has_unnumbered_figures:
rawblocks += [RawBlock(, TEX0),
RawBlock(, TEX1),
RawBlock(, TEX2)]
if captionname != :
rawblocks += [RawBlock(, TEX3 % captionname)]
insert_rawblocks = insert_rawblocks_factory(rawblocks)
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
[insert_rawblocks], altered)
if PANDOCVERSION >= :
doc[] = altered
else:
doc = doc[:1] + altered
json.dump(doc, STDOUT)
STDOUT.flush() |
def split_and_save_datasets(X, Y, paths):
shuffled_idxs = np.random.permutation(np.arange(len(X)))
for i in range(len(paths)):
X_i = X[shuffled_idxs[i::len(paths)]]
Y_i = Y[shuffled_idxs[i::len(paths)]]
np.savez(paths[i], X=X_i, Y=Y_i) | Shuffle X and Y into n / len(paths) datasets, and save them
to disk at the locations provided in paths. | ### Input:
Shuffle X and Y into n / len(paths) datasets, and save them
to disk at the locations provided in paths.
### Response:
def split_and_save_datasets(X, Y, paths):
shuffled_idxs = np.random.permutation(np.arange(len(X)))
for i in range(len(paths)):
X_i = X[shuffled_idxs[i::len(paths)]]
Y_i = Y[shuffled_idxs[i::len(paths)]]
np.savez(paths[i], X=X_i, Y=Y_i) |
def mouse_move_event(self, event):
self.example.mouse_position_event(event.x(), event.y()) | Forward mouse cursor position events to the example | ### Input:
Forward mouse cursor position events to the example
### Response:
def mouse_move_event(self, event):
self.example.mouse_position_event(event.x(), event.y()) |
def apply_regardless(self, fn, *a, **kw):
if self.has_annotations(fn):
return self.apply(fn, *a, **kw)
return fn(*a, **kw) | Like `apply`, but applies if callable is not annotated. | ### Input:
Like `apply`, but applies if callable is not annotated.
### Response:
def apply_regardless(self, fn, *a, **kw):
if self.has_annotations(fn):
return self.apply(fn, *a, **kw)
return fn(*a, **kw) |
def tovalues(self, element_value):
if not isinstance(element_value, (six.integer_types, CIMInt)):
raise TypeError(
_format("The value for value-mapped {0} is not "
"integer-typed, but has Python type: {1}",
self._element_str(), type(element_value)))
try:
return self._b2v_single_dict[element_value]
except KeyError:
pass
for range_tuple in self._b2v_range_tuple_list:
lo, hi, values_str = range_tuple
if lo <= element_value <= hi:
return values_str
if self._b2v_unclaimed is not None:
return self._b2v_unclaimed
raise ValueError(
_format("The value for value-mapped {0} is outside of the set "
"defined by its ValueMap qualifier: {1!A}",
self._element_str(), element_value)) | Return the `Values` string for an element value, based upon this value
mapping.
Parameters:
element_value (:term:`integer` or :class:`~pywbem.CIMInt`):
The value of the CIM element (property, method, parameter).
Returns:
:term:`string`:
The `Values` string for the element value.
Raises:
ValueError: Element value outside of the set defined by `ValueMap`.
TypeError: Element value is not an integer type. | ### Input:
Return the `Values` string for an element value, based upon this value
mapping.
Parameters:
element_value (:term:`integer` or :class:`~pywbem.CIMInt`):
The value of the CIM element (property, method, parameter).
Returns:
:term:`string`:
The `Values` string for the element value.
Raises:
ValueError: Element value outside of the set defined by `ValueMap`.
TypeError: Element value is not an integer type.
### Response:
def tovalues(self, element_value):
if not isinstance(element_value, (six.integer_types, CIMInt)):
raise TypeError(
_format("The value for value-mapped {0} is not "
"integer-typed, but has Python type: {1}",
self._element_str(), type(element_value)))
try:
return self._b2v_single_dict[element_value]
except KeyError:
pass
for range_tuple in self._b2v_range_tuple_list:
lo, hi, values_str = range_tuple
if lo <= element_value <= hi:
return values_str
if self._b2v_unclaimed is not None:
return self._b2v_unclaimed
raise ValueError(
_format("The value for value-mapped {0} is outside of the set "
"defined by its ValueMap qualifier: {1!A}",
self._element_str(), element_value)) |
def _set_prefix(self):
if self.prefix is not None:
value = self.prefix.rstrip()
else:
module_path_parts = self.__module_path_split[:-1]
if module_path_parts[-1] == :
module_path_parts.pop()
value = .join(module_path_parts)
self._prefix = value.upper() | Called by ``__init()__`` to set the object's ``_prefix`` attribute,
which determines the prefix app users must use when overriding
settings associated with this helper. For example:
If the ``_prefix`` attribute were to be set to "YOURAPP", and there
exists an app setting called ``SETTING_NAME``, app users would override
that setting by adding a variable with the name ``YOURAPP_SETTING_NAME``
to their Django settings.
Developers can choose their own prefix by setting the ``prefix``
attribute on their helper class. If no value is specified, a deterministic
default value is generated, based on the where the helper class is defined.
For example:
A helper class defined in ``yourapp/conf/settings.py`` or
``yourapp/settings.py`` would be assigned the prefix: ``"YOURAPP"``.
A helper class is defined in ``yourapp/subapp/conf/settings.py`` or
``yourapp/subapp/settings.py`` would be assigned the prefix: ``"YOURAPP_SUBAPP"``. | ### Input:
Called by ``__init()__`` to set the object's ``_prefix`` attribute,
which determines the prefix app users must use when overriding
settings associated with this helper. For example:
If the ``_prefix`` attribute were to be set to "YOURAPP", and there
exists an app setting called ``SETTING_NAME``, app users would override
that setting by adding a variable with the name ``YOURAPP_SETTING_NAME``
to their Django settings.
Developers can choose their own prefix by setting the ``prefix``
attribute on their helper class. If no value is specified, a deterministic
default value is generated, based on the where the helper class is defined.
For example:
A helper class defined in ``yourapp/conf/settings.py`` or
``yourapp/settings.py`` would be assigned the prefix: ``"YOURAPP"``.
A helper class is defined in ``yourapp/subapp/conf/settings.py`` or
``yourapp/subapp/settings.py`` would be assigned the prefix: ``"YOURAPP_SUBAPP"``.
### Response:
def _set_prefix(self):
if self.prefix is not None:
value = self.prefix.rstrip()
else:
module_path_parts = self.__module_path_split[:-1]
if module_path_parts[-1] == :
module_path_parts.pop()
value = .join(module_path_parts)
self._prefix = value.upper() |
def timetuple(self):
return time.struct_time((
self.year,
self.month,
self.day,
0,
0,
0,
self.weekday(),
self.dayofyear(),
-1
)) | It's equivalent to:
>>> time.struct_time((d.year, d.month, d.day, d.hour, d.minute, d.second, d.weekday(), dayofyear, [-1|1|0])) # doctest: +SKIP
time.struct_time(tm_year=2015, tm_mon=7, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=1, tm_yday=209, tm_isdst=-1)
The tm_isdst flag of the result is set according to the dst() method: `tzinfo`
is None or dst() returns None, tm_isdst is set to -1; else if dst()
returns a non-zero value, tm_isdst is set to 1; else tm_isdst is set to 0.
:return: A :py:class:`time.struct_time` such as returned by time.localtime().
:rtype: :py:class:`time.struct_time` | ### Input:
It's equivalent to:
>>> time.struct_time((d.year, d.month, d.day, d.hour, d.minute, d.second, d.weekday(), dayofyear, [-1|1|0])) # doctest: +SKIP
time.struct_time(tm_year=2015, tm_mon=7, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=1, tm_yday=209, tm_isdst=-1)
The tm_isdst flag of the result is set according to the dst() method: `tzinfo`
is None or dst() returns None, tm_isdst is set to -1; else if dst()
returns a non-zero value, tm_isdst is set to 1; else tm_isdst is set to 0.
:return: A :py:class:`time.struct_time` such as returned by time.localtime().
:rtype: :py:class:`time.struct_time`
### Response:
def timetuple(self):
return time.struct_time((
self.year,
self.month,
self.day,
0,
0,
0,
self.weekday(),
self.dayofyear(),
-1
)) |
def _send_one(self, stream, handle, target_id, name):
if not stream:
return
data = str(target_id)
if name:
data = % (target_id, name)
stream.send(
mitogen.core.Message(
handle=handle,
data=data.encode(),
dst_id=stream.remote_id,
)
) | Compose and send an update message on a stream.
:param mitogen.core.Stream stream:
Stream to send it on.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
Context name or :data:`None`. | ### Input:
Compose and send an update message on a stream.
:param mitogen.core.Stream stream:
Stream to send it on.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
Context name or :data:`None`.
### Response:
def _send_one(self, stream, handle, target_id, name):
if not stream:
return
data = str(target_id)
if name:
data = % (target_id, name)
stream.send(
mitogen.core.Message(
handle=handle,
data=data.encode(),
dst_id=stream.remote_id,
)
) |
def _get_scope_with_symbol(self, name):
scope = self
while True:
parent = scope.get_enclosing_scope()
if parent is None:
return
if name in parent.symbols:
return parent
scope = parent | Return a scope containing passed name as a symbol name. | ### Input:
Return a scope containing passed name as a symbol name.
### Response:
def _get_scope_with_symbol(self, name):
scope = self
while True:
parent = scope.get_enclosing_scope()
if parent is None:
return
if name in parent.symbols:
return parent
scope = parent |
def _set_level2_into_level1(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=level2_into_level1.level2_into_level1, is_container=, presence=True, yang_name="level2-into-level1", rest_name="level-1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__level2_into_level1 = t
if hasattr(self, ):
self._set() | Setter method for level2_into_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/isis/level_2/into/level2_into_level1 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_level2_into_level1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_level2_into_level1() directly. | ### Input:
Setter method for level2_into_level1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/isis/level_2/into/level2_into_level1 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_level2_into_level1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_level2_into_level1() directly.
### Response:
def _set_level2_into_level1(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=level2_into_level1.level2_into_level1, is_container=, presence=True, yang_name="level2-into-level1", rest_name="level-1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__level2_into_level1 = t
if hasattr(self, ):
self._set() |
def data_log_likelihood(self, successes, trials, beta):
return binom.logpmf(successes, trials, 1.0 / (1 + np.exp(-beta))).sum() | Calculates the log-likelihood of a Polya tree bin given the beta values. | ### Input:
Calculates the log-likelihood of a Polya tree bin given the beta values.
### Response:
def data_log_likelihood(self, successes, trials, beta):
return binom.logpmf(successes, trials, 1.0 / (1 + np.exp(-beta))).sum() |
def mask_nonfinite(self):
self.mask = np.logical_and(self.mask, (np.isfinite(self.intensity))) | Extend the mask with the image elements where the intensity is NaN. | ### Input:
Extend the mask with the image elements where the intensity is NaN.
### Response:
def mask_nonfinite(self):
self.mask = np.logical_and(self.mask, (np.isfinite(self.intensity))) |
def get_datatypes(self):
datatypes = set()
for element in self.elements:
if isinstance(element, PolygonSet):
datatypes.update(element.datatypes)
elif isinstance(element, CellReference) or isinstance(
element, CellArray):
datatypes.update(element.ref_cell.get_datatypes())
return datatypes | Returns a set of datatypes in this cell.
Returns
-------
out : set
Set of the datatypes used in this cell. | ### Input:
Returns a set of datatypes in this cell.
Returns
-------
out : set
Set of the datatypes used in this cell.
### Response:
def get_datatypes(self):
datatypes = set()
for element in self.elements:
if isinstance(element, PolygonSet):
datatypes.update(element.datatypes)
elif isinstance(element, CellReference) or isinstance(
element, CellArray):
datatypes.update(element.ref_cell.get_datatypes())
return datatypes |
def array_to_img(x, dim_ordering=(0, 1, 2), scale=True):
x = x.transpose(dim_ordering)
if scale:
x += max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x = x / x_max
x *= 255
if x.shape[2] == 3:
return PIL.Image.fromarray(x.astype(), )
elif x.shape[2] == 1:
return PIL.Image.fromarray(x[:, :, 0].astype(), )
else:
raise Exception(, x.shape[2]) | Converts a numpy array to PIL image object (uint8 format).
Parameters
----------
x : numpy.array
An image with dimension of 3 and channels of 1 or 3.
dim_ordering : tuple of 3 int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
scale : boolean
If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True.
Returns
-------
PIL.image
An image.
References
-----------
`PIL Image.fromarray <http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=fromarray>`__ | ### Input:
Converts a numpy array to PIL image object (uint8 format).
Parameters
----------
x : numpy.array
An image with dimension of 3 and channels of 1 or 3.
dim_ordering : tuple of 3 int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
scale : boolean
If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True.
Returns
-------
PIL.image
An image.
References
-----------
`PIL Image.fromarray <http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=fromarray>`__
### Response:
def array_to_img(x, dim_ordering=(0, 1, 2), scale=True):
x = x.transpose(dim_ordering)
if scale:
x += max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x = x / x_max
x *= 255
if x.shape[2] == 3:
return PIL.Image.fromarray(x.astype(), )
elif x.shape[2] == 1:
return PIL.Image.fromarray(x[:, :, 0].astype(), )
else:
raise Exception(, x.shape[2]) |
def fill_initial_broks(self, broker_name):
broker_uuid = None
logger.debug("My brokers: %s", self.my_daemon.brokers)
for broker_link in list(self.my_daemon.brokers.values()):
logger.debug("Searching broker: %s", broker_link)
if broker_name == broker_link.name:
broker_uuid = broker_link.uuid
logger.info("Filling initial broks for: %s (%s)", broker_name, broker_uuid)
break
else:
if self.pushed_conf:
logger.error("Requested initial broks for an unknown broker: %s", broker_name)
else:
logger.info("Requested initial broks for an unknown broker: %s", broker_name)
return 0
if self.my_daemon.brokers[broker_uuid].initialized:
logger.warning("The broker %s still got its initial broks...", broker_name)
return 0
initial_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
brok = self.get_program_status_brok()
self.add_brok(brok, broker_uuid)
self.add_brok(brok, broker_uuid)
final_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
self.my_daemon.brokers[broker_uuid].initialized = True
self.send_broks_to_modules()
self.raised_initial_broks = True
logger.info("Created %d initial broks for %s",
final_broks_count - initial_broks_count, broker_name)
return final_broks_count - initial_broks_count | Create initial broks for a specific broker
:param broker_name: broker name
:type broker_name: str
:return: number of created broks | ### Input:
Create initial broks for a specific broker
:param broker_name: broker name
:type broker_name: str
:return: number of created broks
### Response:
def fill_initial_broks(self, broker_name):
broker_uuid = None
logger.debug("My brokers: %s", self.my_daemon.brokers)
for broker_link in list(self.my_daemon.brokers.values()):
logger.debug("Searching broker: %s", broker_link)
if broker_name == broker_link.name:
broker_uuid = broker_link.uuid
logger.info("Filling initial broks for: %s (%s)", broker_name, broker_uuid)
break
else:
if self.pushed_conf:
logger.error("Requested initial broks for an unknown broker: %s", broker_name)
else:
logger.info("Requested initial broks for an unknown broker: %s", broker_name)
return 0
if self.my_daemon.brokers[broker_uuid].initialized:
logger.warning("The broker %s still got its initial broks...", broker_name)
return 0
initial_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
brok = self.get_program_status_brok()
self.add_brok(brok, broker_uuid)
self.add_brok(brok, broker_uuid)
final_broks_count = len(self.my_daemon.brokers[broker_uuid].broks)
self.my_daemon.brokers[broker_uuid].initialized = True
self.send_broks_to_modules()
self.raised_initial_broks = True
logger.info("Created %d initial broks for %s",
final_broks_count - initial_broks_count, broker_name)
return final_broks_count - initial_broks_count |
def catch(cls, catch_exception, config=):
def wrap(method):
@functools.wraps(method)
def wrapped_method(self, *args, **kwargs):
assert isinstance(self, HA)
delay_policy = self.ha_get_delay_policy(config)
max_retries = self.ha_get_config(config).max_retries
for retries in itertools.count():
try:
return method(self, *args, **kwargs)
except catch_exception as e:
res = self.ha_on_error(method, e, args, kwargs)
if res is not None:
args, kwargs = res
if max_retries and retries >= max_retries:
raise
tts = next(delay_policy)
time.sleep(tts)
return wrapped_method
return wrap | Decorator class method catching exceptions raised by the wrapped
member function. When exception is caught, the decorator waits
for an amount of time specified in the `ha_config`.
:param catch_exception: Exception class or tuple of exception classes. | ### Input:
Decorator class method catching exceptions raised by the wrapped
member function. When exception is caught, the decorator waits
for an amount of time specified in the `ha_config`.
:param catch_exception: Exception class or tuple of exception classes.
### Response:
def catch(cls, catch_exception, config=):
def wrap(method):
@functools.wraps(method)
def wrapped_method(self, *args, **kwargs):
assert isinstance(self, HA)
delay_policy = self.ha_get_delay_policy(config)
max_retries = self.ha_get_config(config).max_retries
for retries in itertools.count():
try:
return method(self, *args, **kwargs)
except catch_exception as e:
res = self.ha_on_error(method, e, args, kwargs)
if res is not None:
args, kwargs = res
if max_retries and retries >= max_retries:
raise
tts = next(delay_policy)
time.sleep(tts)
return wrapped_method
return wrap |
def exec_args(args, in_data=, chdir=None, shell=None, emulate_tty=False):
LOG.debug(, args, chdir)
assert isinstance(args, list)
if emulate_tty:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
proc = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=subprocess.PIPE,
cwd=chdir,
)
stdout, stderr = proc.communicate(in_data)
if emulate_tty:
stdout = stdout.replace(b(), b())
return proc.returncode, stdout, stderr or b() | Run a command in a subprocess, emulating the argument handling behaviour of
SSH.
:param list[str]:
Argument vector.
:param bytes in_data:
Optional standard input for the command.
:param bool emulate_tty:
If :data:`True`, arrange for stdout and stderr to be merged into the
stdout pipe and for LF to be translated into CRLF, emulating the
behaviour of a TTY.
:return:
(return code, stdout bytes, stderr bytes) | ### Input:
Run a command in a subprocess, emulating the argument handling behaviour of
SSH.
:param list[str]:
Argument vector.
:param bytes in_data:
Optional standard input for the command.
:param bool emulate_tty:
If :data:`True`, arrange for stdout and stderr to be merged into the
stdout pipe and for LF to be translated into CRLF, emulating the
behaviour of a TTY.
:return:
(return code, stdout bytes, stderr bytes)
### Response:
def exec_args(args, in_data=, chdir=None, shell=None, emulate_tty=False):
LOG.debug(, args, chdir)
assert isinstance(args, list)
if emulate_tty:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
proc = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=subprocess.PIPE,
cwd=chdir,
)
stdout, stderr = proc.communicate(in_data)
if emulate_tty:
stdout = stdout.replace(b(), b())
return proc.returncode, stdout, stderr or b() |
def update_policy(self, scaling_group, policy, name=None, policy_type=None,
cooldown=None, change=None, is_percent=False,
desired_capacity=None, args=None):
return self._manager.update_policy(scaling_group, policy, name=name,
policy_type=policy_type, cooldown=cooldown, change=change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args) | Updates the specified policy. One or more of the parameters may be
specified. | ### Input:
Updates the specified policy. One or more of the parameters may be
specified.
### Response:
def update_policy(self, scaling_group, policy, name=None, policy_type=None,
cooldown=None, change=None, is_percent=False,
desired_capacity=None, args=None):
return self._manager.update_policy(scaling_group, policy, name=name,
policy_type=policy_type, cooldown=cooldown, change=change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args) |
def parse_trial_data(content):
trial_records = []
for trial_data in content:
for phase_i in range(len(trial_data[])):
hparam = json.loads(trial_data[][phase_i])[]
hparam[] = trial_data[]
if in trial_data.keys() and phase_i < len(trial_data[]):
reward = json.loads(trial_data[][phase_i][])
if isinstance(reward, (float, int)):
dict_tmp = {**hparam, **{: reward}}
elif isinstance(reward, dict):
dict_tmp = {**hparam, **reward}
else:
raise ValueError("Invalid finalMetricsData format: {}/{}".format(type(reward), reward))
else:
dict_tmp = hparam
trial_records.append(dict_tmp)
return trial_records | output: List[Dict] | ### Input:
output: List[Dict]
### Response:
def parse_trial_data(content):
trial_records = []
for trial_data in content:
for phase_i in range(len(trial_data[])):
hparam = json.loads(trial_data[][phase_i])[]
hparam[] = trial_data[]
if in trial_data.keys() and phase_i < len(trial_data[]):
reward = json.loads(trial_data[][phase_i][])
if isinstance(reward, (float, int)):
dict_tmp = {**hparam, **{: reward}}
elif isinstance(reward, dict):
dict_tmp = {**hparam, **reward}
else:
raise ValueError("Invalid finalMetricsData format: {}/{}".format(type(reward), reward))
else:
dict_tmp = hparam
trial_records.append(dict_tmp)
return trial_records |
def strip_v2_elements(etree_obj):
if etree_obj.tag == v2_0_tag():
strip_logEntry(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_log(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_node(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_node_list(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_system_metadata(etree_obj)
else:
raise ValueError(.format(etree_obj.tag)) | In-place remove elements and attributes that are only valid in v2 types.
Args: etree_obj: ElementTree ElementTree holding one of the DataONE API types
that changed between v1 and v2. | ### Input:
In-place remove elements and attributes that are only valid in v2 types.
Args: etree_obj: ElementTree ElementTree holding one of the DataONE API types
that changed between v1 and v2.
### Response:
def strip_v2_elements(etree_obj):
if etree_obj.tag == v2_0_tag():
strip_logEntry(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_log(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_node(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_node_list(etree_obj)
elif etree_obj.tag == v2_0_tag():
strip_system_metadata(etree_obj)
else:
raise ValueError(.format(etree_obj.tag)) |
def _build_header(self):
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += hs.header | Adds the header template to the master template string | ### Input:
Adds the header template to the master template string
### Response:
def _build_header(self):
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += hs.header |
def preseed_package(pkg_name, preseed):
for q_name, _ in preseed.items():
q_type, q_answer = _
run_as_root( % locals()) | Enable unattended package installation by preseeding ``debconf``
parameters.
Example::
import burlap
# Unattended install of Postfix mail server
burlap.deb.preseed_package('postfix', {
'postfix/main_mailer_type': ('select', 'Internet Site'),
'postfix/mailname': ('string', 'example.com'),
'postfix/destinations': ('string', 'example.com, localhost.localdomain, localhost'),
})
burlap.deb.install('postfix') | ### Input:
Enable unattended package installation by preseeding ``debconf``
parameters.
Example::
import burlap
# Unattended install of Postfix mail server
burlap.deb.preseed_package('postfix', {
'postfix/main_mailer_type': ('select', 'Internet Site'),
'postfix/mailname': ('string', 'example.com'),
'postfix/destinations': ('string', 'example.com, localhost.localdomain, localhost'),
})
burlap.deb.install('postfix')
### Response:
def preseed_package(pkg_name, preseed):
for q_name, _ in preseed.items():
q_type, q_answer = _
run_as_root( % locals()) |
def cdf(data,mode=,**kwargs):
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) ) | Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points. | ### Input:
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
### Response:
def cdf(data,mode=,**kwargs):
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) ) |
async def upload(self, files: Sequence[Union[str, Path]],
basedir: Union[str, Path] = None,
show_progress: bool = False):
params = {}
if self.owner_access_key:
params[] = self.owner_access_key
base_path = (Path.cwd() if basedir is None
else Path(basedir).resolve())
files = [Path(file).resolve() for file in files]
total_size = 0
for file_path in files:
total_size += file_path.stat().st_size
tqdm_obj = tqdm(desc=,
unit=, unit_scale=True,
total=total_size,
disable=not show_progress)
with tqdm_obj:
attachments = []
for file_path in files:
try:
attachments.append(AttachedFile(
str(file_path.relative_to(base_path)),
ProgressReportingReader(str(file_path),
tqdm_instance=tqdm_obj),
,
))
except ValueError:
msg = \
.format(file_path, base_path)
raise ValueError(msg) from None
rqst = Request(self.session,
, .format(self.kernel_id),
params=params)
rqst.attach_files(attachments)
async with rqst.fetch() as resp:
return resp | Uploads the given list of files to the compute session.
You may refer them in the batch-mode execution or from the code
executed in the server afterwards.
:param files: The list of file paths in the client-side.
If the paths include directories, the location of them in the compute
session is calculated from the relative path to *basedir* and all
intermediate parent directories are automatically created if not exists.
For example, if a file path is ``/home/user/test/data.txt`` (or
``test/data.txt``) where *basedir* is ``/home/user`` (or the current
working directory is ``/home/user``), the uploaded file is located at
``/home/work/test/data.txt`` in the compute session container.
:param basedir: The directory prefix where the files reside.
The default value is the current working directory.
:param show_progress: Displays a progress bar during uploads. | ### Input:
Uploads the given list of files to the compute session.
You may refer them in the batch-mode execution or from the code
executed in the server afterwards.
:param files: The list of file paths in the client-side.
If the paths include directories, the location of them in the compute
session is calculated from the relative path to *basedir* and all
intermediate parent directories are automatically created if not exists.
For example, if a file path is ``/home/user/test/data.txt`` (or
``test/data.txt``) where *basedir* is ``/home/user`` (or the current
working directory is ``/home/user``), the uploaded file is located at
``/home/work/test/data.txt`` in the compute session container.
:param basedir: The directory prefix where the files reside.
The default value is the current working directory.
:param show_progress: Displays a progress bar during uploads.
### Response:
async def upload(self, files: Sequence[Union[str, Path]],
basedir: Union[str, Path] = None,
show_progress: bool = False):
params = {}
if self.owner_access_key:
params[] = self.owner_access_key
base_path = (Path.cwd() if basedir is None
else Path(basedir).resolve())
files = [Path(file).resolve() for file in files]
total_size = 0
for file_path in files:
total_size += file_path.stat().st_size
tqdm_obj = tqdm(desc=,
unit=, unit_scale=True,
total=total_size,
disable=not show_progress)
with tqdm_obj:
attachments = []
for file_path in files:
try:
attachments.append(AttachedFile(
str(file_path.relative_to(base_path)),
ProgressReportingReader(str(file_path),
tqdm_instance=tqdm_obj),
,
))
except ValueError:
msg = \
.format(file_path, base_path)
raise ValueError(msg) from None
rqst = Request(self.session,
, .format(self.kernel_id),
params=params)
rqst.attach_files(attachments)
async with rqst.fetch() as resp:
return resp |
def align(self, alignraster, how=np.mean, cxsize=None, cysize=None):
return align_georasters(self, alignraster, how=how, cxsize=cxsize, cysize=cysize) | geo.align(geo2, how=np.mean)
Returns both georasters aligned and with the same pixelsize | ### Input:
geo.align(geo2, how=np.mean)
Returns both georasters aligned and with the same pixelsize
### Response:
def align(self, alignraster, how=np.mean, cxsize=None, cysize=None):
return align_georasters(self, alignraster, how=how, cxsize=cxsize, cysize=cysize) |
def get_specific_user():
user = get_user()
if salt.utils.platform.is_windows():
if _win_current_user_is_admin():
return .format(user)
else:
env_vars = (,)
if user == :
for evar in env_vars:
if evar in os.environ:
return .format(os.environ[evar])
return user | Get a user name for publishing. If you find the user is "root" attempt to be
more specific | ### Input:
Get a user name for publishing. If you find the user is "root" attempt to be
more specific
### Response:
def get_specific_user():
user = get_user()
if salt.utils.platform.is_windows():
if _win_current_user_is_admin():
return .format(user)
else:
env_vars = (,)
if user == :
for evar in env_vars:
if evar in os.environ:
return .format(os.environ[evar])
return user |
def lookup_family_by_name(name):
for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, ):
if ops.o_name == name:
return ops
return None | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106.
Positional arguments:
name -- string.
Returns:
genl_ops class instance or None. | ### Input:
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106.
Positional arguments:
name -- string.
Returns:
genl_ops class instance or None.
### Response:
def lookup_family_by_name(name):
for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, ):
if ops.o_name == name:
return ops
return None |
def anytype(self):
p, u = Namespace.xsdns
mp = self.root.findPrefix(u)
if mp is None:
mp = p
self.root.addPrefix(p, u)
return .join((mp, )) | create an xsd:anyType reference | ### Input:
create an xsd:anyType reference
### Response:
def anytype(self):
p, u = Namespace.xsdns
mp = self.root.findPrefix(u)
if mp is None:
mp = p
self.root.addPrefix(p, u)
return .join((mp, )) |
def send_peers(self, connection_id):
with self._lock:
peer_endpoints = list(self._peers.values())
if self._endpoint:
peer_endpoints.append(self._endpoint)
peers_response = GetPeersResponse(peer_endpoints=peer_endpoints)
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
peers_response.SerializeToString(),
connection_id,
one_way=True)
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id) | Sends a message containing our peers to the
connection identified by connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket. | ### Input:
Sends a message containing our peers to the
connection identified by connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
### Response:
def send_peers(self, connection_id):
with self._lock:
peer_endpoints = list(self._peers.values())
if self._endpoint:
peer_endpoints.append(self._endpoint)
peers_response = GetPeersResponse(peer_endpoints=peer_endpoints)
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
peers_response.SerializeToString(),
connection_id,
one_way=True)
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id) |
def shell_join(delim, it):
return ShellQuoted(delim.join(raw_shell(s) for s in it)) | Joins an iterable of ShellQuoted with a delimiter between each two | ### Input:
Joins an iterable of ShellQuoted with a delimiter between each two
### Response:
def shell_join(delim, it):
return ShellQuoted(delim.join(raw_shell(s) for s in it)) |
def _is_valid_value(self):
comp_str = self._encoded_value
value_pattern = []
value_pattern.append("^((")
value_pattern.append("~[")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+")
value_pattern.append(")|(")
value_pattern.append("[")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+(![")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+)*")
value_pattern.append("))$")
value_rxc = re.compile("".join(value_pattern))
return value_rxc.match(comp_str) is not None | Return True if the value of component in generic attribute is valid,
and otherwise False.
:returns: True if value is valid, False otherwise
:rtype: boolean | ### Input:
Return True if the value of component in generic attribute is valid,
and otherwise False.
:returns: True if value is valid, False otherwise
:rtype: boolean
### Response:
def _is_valid_value(self):
comp_str = self._encoded_value
value_pattern = []
value_pattern.append("^((")
value_pattern.append("~[")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+")
value_pattern.append(")|(")
value_pattern.append("[")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+(![")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+)*")
value_pattern.append("))$")
value_rxc = re.compile("".join(value_pattern))
return value_rxc.match(comp_str) is not None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.