code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def convert(self,label,units=None,conversion_function=convert_time):
label_no = self.get_label_no(label)
new_label, new_column = self.get_converted(label_no,units,conversion_function)
labels = [LabelDimension(l) for l in self.labels]
labels[label_no] = new_label
matrix = self.matrix.copy()
matrix[:,label_no] = new_column
return LabeledMatrix(matrix,labels) | converts a dimension in place | ### Input:
converts a dimension in place
### Response:
def convert(self,label,units=None,conversion_function=convert_time):
label_no = self.get_label_no(label)
new_label, new_column = self.get_converted(label_no,units,conversion_function)
labels = [LabelDimension(l) for l in self.labels]
labels[label_no] = new_label
matrix = self.matrix.copy()
matrix[:,label_no] = new_column
return LabeledMatrix(matrix,labels) |
def _render_rows(self):
_datas = getattr(self, , ())
headers = getattr(self, , ())
for index, row in enumerate(_datas):
row_number = index + 2
for col_num, value in enumerate(row):
cell = self.worksheet.cell(row=row_number, column=col_num + 1)
if value is not None:
cell.value = value
else:
cell.value = ""
if len(headers) > col_num:
header = headers[col_num]
format = get_cell_format(header)
if format is not None:
cell.number_format = format | Render the rows in the current stylesheet | ### Input:
Render the rows in the current stylesheet
### Response:
def _render_rows(self):
_datas = getattr(self, , ())
headers = getattr(self, , ())
for index, row in enumerate(_datas):
row_number = index + 2
for col_num, value in enumerate(row):
cell = self.worksheet.cell(row=row_number, column=col_num + 1)
if value is not None:
cell.value = value
else:
cell.value = ""
if len(headers) > col_num:
header = headers[col_num]
format = get_cell_format(header)
if format is not None:
cell.number_format = format |
def log_tensor_stats(self, tensor, name):
if (isinstance(tensor, tuple) or isinstance(tensor, list)):
while (isinstance(tensor, tuple) or isinstance(tensor, list)) and (isinstance(tensor[0], tuple) or isinstance(tensor[0], list)):
tensor = [item for sublist in tensor for item in sublist]
tensor = torch.cat([t.view(-1) for t in tensor])
cls.__module__, cls.__name__))
history = self._history()
if history is None or not history.compute:
return
if isinstance(tensor, torch.HalfTensor):
tensor = tensor.clone().type(torch.FloatTensor).detach()
flat = tensor.view(-1)
if not hasattr(flat, "detach"):
tensor = flat.cpu().clone().numpy()
history.row.update({
name: wandb.Histogram(tensor)
})
return
if flat.is_cuda:
if self._is_cuda_histc_supported is None:
self._is_cuda_histc_supported = True
check = torch.cuda.FloatTensor(1).fill_(0)
try:
check = flat.histc(bins=self._num_bins)
except RuntimeError as e:
if str(e).startswith("_th_histc is not implemented"):
self._is_cuda_histc_supported = False
if not self._is_cuda_histc_supported:
flat = flat.cpu().clone().detach()
if isinstance(flat, torch.cuda.HalfTensor):
flat = flat.clone().type(torch.cuda.FloatTensor).detach()
if isinstance(flat, torch.HalfTensor):
flat = flat.clone().type(torch.FloatTensor).detach()
tmin = flat.min().item()
tmax = flat.max().item()
tensor = flat.histc(bins=self._num_bins, min=tmin, max=tmax)
tensor = tensor.cpu().clone().detach()
bins = torch.linspace(tmin, tmax, steps=self._num_bins + 1)
history.row.update({
name: wandb.Histogram(np_histogram=(
tensor.tolist(), bins.tolist()))
}) | Add distribution statistics on a tensor's elements to the current History entry | ### Input:
Add distribution statistics on a tensor's elements to the current History entry
### Response:
def log_tensor_stats(self, tensor, name):
if (isinstance(tensor, tuple) or isinstance(tensor, list)):
while (isinstance(tensor, tuple) or isinstance(tensor, list)) and (isinstance(tensor[0], tuple) or isinstance(tensor[0], list)):
tensor = [item for sublist in tensor for item in sublist]
tensor = torch.cat([t.view(-1) for t in tensor])
cls.__module__, cls.__name__))
history = self._history()
if history is None or not history.compute:
return
if isinstance(tensor, torch.HalfTensor):
tensor = tensor.clone().type(torch.FloatTensor).detach()
flat = tensor.view(-1)
if not hasattr(flat, "detach"):
tensor = flat.cpu().clone().numpy()
history.row.update({
name: wandb.Histogram(tensor)
})
return
if flat.is_cuda:
if self._is_cuda_histc_supported is None:
self._is_cuda_histc_supported = True
check = torch.cuda.FloatTensor(1).fill_(0)
try:
check = flat.histc(bins=self._num_bins)
except RuntimeError as e:
if str(e).startswith("_th_histc is not implemented"):
self._is_cuda_histc_supported = False
if not self._is_cuda_histc_supported:
flat = flat.cpu().clone().detach()
if isinstance(flat, torch.cuda.HalfTensor):
flat = flat.clone().type(torch.cuda.FloatTensor).detach()
if isinstance(flat, torch.HalfTensor):
flat = flat.clone().type(torch.FloatTensor).detach()
tmin = flat.min().item()
tmax = flat.max().item()
tensor = flat.histc(bins=self._num_bins, min=tmin, max=tmax)
tensor = tensor.cpu().clone().detach()
bins = torch.linspace(tmin, tmax, steps=self._num_bins + 1)
history.row.update({
name: wandb.Histogram(np_histogram=(
tensor.tolist(), bins.tolist()))
}) |
def send_response_only(self, code, message=None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message =
if self.request_version != :
if not hasattr(self, ):
self._headers_buffer = []
self._headers_buffer.append(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode(
, )) | Send the response header only. | ### Input:
Send the response header only.
### Response:
def send_response_only(self, code, message=None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message =
if self.request_version != :
if not hasattr(self, ):
self._headers_buffer = []
self._headers_buffer.append(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode(
, )) |
def maximize(func, x0, nmr_observations, **kwargs):
wrapped_func = SimpleCLFunction.from_string( + func.get_cl_function_name() + + func.get_cl_function_name() + + str(nmr_observations) + , dependencies=[func])
kwargs[] = nmr_observations
return minimize(wrapped_func, x0, **kwargs) | Maximization of a function.
This wraps the objective function to take the negative of the computed values and passes it then on to one
of the minimization routines.
Args:
func (mot.lib.cl_function.CLFunction): A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* objective_list);
The objective list needs to be filled when the provided pointer is not null. It should contain
the function values for each observation. This list is used by non-linear least-squares routines,
and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine.
x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p'
independent variables.
nmr_observations (int): the number of observations returned by the optimization function.
**kwargs: see :func:`minimize`. | ### Input:
Maximization of a function.
This wraps the objective function to take the negative of the computed values and passes it then on to one
of the minimization routines.
Args:
func (mot.lib.cl_function.CLFunction): A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* objective_list);
The objective list needs to be filled when the provided pointer is not null. It should contain
the function values for each observation. This list is used by non-linear least-squares routines,
and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine.
x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p'
independent variables.
nmr_observations (int): the number of observations returned by the optimization function.
**kwargs: see :func:`minimize`.
### Response:
def maximize(func, x0, nmr_observations, **kwargs):
wrapped_func = SimpleCLFunction.from_string( + func.get_cl_function_name() + + func.get_cl_function_name() + + str(nmr_observations) + , dependencies=[func])
kwargs[] = nmr_observations
return minimize(wrapped_func, x0, **kwargs) |
def check_spec(self, pos_args, kwargs=None):
if kwargs is None:
kwargs = {}
if self.varargs is not None or self.kwargs is not None:
raise InternalError("check_spec cannot be called on a function that takes *args or **kwargs")
missing = object()
arg_vals = [missing]*len(self.arg_names)
kw_indices = {name: i for i, name in enumerate(self.arg_names)}
for i, arg in enumerate(pos_args):
if i >= len(arg_vals):
raise ArgumentError("Too many positional arguments, first excessive argument=%s" % str(arg))
arg_vals[i] = arg
for arg, val in kwargs.items():
index = kw_indices.get(arg)
if index is None:
raise ArgumentError("Cannot find argument by name: %s" % arg)
if arg_vals[index] is not missing:
raise ValidationError("Argument %s passed twice" % arg)
arg_vals[index] = val
if len(self.arg_defaults) > 0:
for i in range(0, len(self.arg_defaults)):
neg_index = -len(self.arg_defaults) + i
if arg_vals[neg_index] is missing:
arg_vals[neg_index] = self.arg_defaults[i]
if missing in arg_vals:
index = arg_vals.index(missing)
raise ArgumentError("Missing a required argument (position: %d, name: %s)" % (index, self.arg_names[index]))
return {name: val for name, val in zip(self.arg_names, arg_vals)} | Check if there are any missing or duplicate arguments.
Args:
pos_args (list): A list of arguments that will be passed as positional
arguments.
kwargs (dict): A dictionary of the keyword arguments that will be passed.
Returns:
dict: A dictionary of argument name to argument value, pulled from either
the value passed or the default value if no argument is passed.
Raises:
ArgumentError: If a positional or keyword argument does not fit in the spec.
ValidationError: If an argument is passed twice. | ### Input:
Check if there are any missing or duplicate arguments.
Args:
pos_args (list): A list of arguments that will be passed as positional
arguments.
kwargs (dict): A dictionary of the keyword arguments that will be passed.
Returns:
dict: A dictionary of argument name to argument value, pulled from either
the value passed or the default value if no argument is passed.
Raises:
ArgumentError: If a positional or keyword argument does not fit in the spec.
ValidationError: If an argument is passed twice.
### Response:
def check_spec(self, pos_args, kwargs=None):
if kwargs is None:
kwargs = {}
if self.varargs is not None or self.kwargs is not None:
raise InternalError("check_spec cannot be called on a function that takes *args or **kwargs")
missing = object()
arg_vals = [missing]*len(self.arg_names)
kw_indices = {name: i for i, name in enumerate(self.arg_names)}
for i, arg in enumerate(pos_args):
if i >= len(arg_vals):
raise ArgumentError("Too many positional arguments, first excessive argument=%s" % str(arg))
arg_vals[i] = arg
for arg, val in kwargs.items():
index = kw_indices.get(arg)
if index is None:
raise ArgumentError("Cannot find argument by name: %s" % arg)
if arg_vals[index] is not missing:
raise ValidationError("Argument %s passed twice" % arg)
arg_vals[index] = val
if len(self.arg_defaults) > 0:
for i in range(0, len(self.arg_defaults)):
neg_index = -len(self.arg_defaults) + i
if arg_vals[neg_index] is missing:
arg_vals[neg_index] = self.arg_defaults[i]
if missing in arg_vals:
index = arg_vals.index(missing)
raise ArgumentError("Missing a required argument (position: %d, name: %s)" % (index, self.arg_names[index]))
return {name: val for name, val in zip(self.arg_names, arg_vals)} |
def replace_line_magic(source, magic, template=):
filtered = []
for line in source.splitlines():
if line.strip().startswith(magic):
substitution = template.format(line=line.replace(magic, ))
filtered.append(substitution)
else:
filtered.append(line)
return .join(filtered) | Given a cell's source, replace line magics using a formatting
template, where {line} is the string that follows the magic. | ### Input:
Given a cell's source, replace line magics using a formatting
template, where {line} is the string that follows the magic.
### Response:
def replace_line_magic(source, magic, template=):
filtered = []
for line in source.splitlines():
if line.strip().startswith(magic):
substitution = template.format(line=line.replace(magic, ))
filtered.append(substitution)
else:
filtered.append(line)
return .join(filtered) |
def parse_fields_whois(self, response):
try:
temp = response.split()
ret = {: temp[4].strip()}
if ret[] not in self.rir_whois.keys():
raise ASNRegistryError(
.format(
ret[])
)
ret[] = temp[0].strip()
ret[] = temp[2].strip()
ret[] = temp[3].strip().upper()
ret[] = temp[5].strip()
ret[] = temp[6].strip()
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError(
.format(response, e)[:100])
return ret | The function for parsing ASN fields from a whois response.
Args:
response (:obj:`str`): The response from the ASN whois server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed. | ### Input:
The function for parsing ASN fields from a whois response.
Args:
response (:obj:`str`): The response from the ASN whois server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
### Response:
def parse_fields_whois(self, response):
try:
temp = response.split()
ret = {: temp[4].strip()}
if ret[] not in self.rir_whois.keys():
raise ASNRegistryError(
.format(
ret[])
)
ret[] = temp[0].strip()
ret[] = temp[2].strip()
ret[] = temp[3].strip().upper()
ret[] = temp[5].strip()
ret[] = temp[6].strip()
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError(
.format(response, e)[:100])
return ret |
def extract_ends(rec, sites, flank, fw, maxfragsize=800):
nsites = len(sites)
size = len(rec)
for i, s in enumerate(sites):
newid = "{0}:{1}".format(rec.name, s)
recs = []
if i == 0 or s - sites[i - 1] <= maxfragsize:
newidL = newid + "L"
left = max(s - flank, 0)
right = s
frag = rec.seq[left:right].strip("Nn")
recL = SeqRecord(frag, id=newidL, description="")
if i == 0 and s > maxfragsize:
pass
else:
recs.append(recL)
if i == nsites - 1 or sites[i + 1] - s <= maxfragsize:
newidR = newid + "R"
left = s
right = min(s + flank, size)
frag = rec.seq[left:right].strip("Nn")
recR = SeqRecord(frag, id=newidR, description="")
if i == nsites - 1 and size - s > maxfragsize:
pass
else:
recs.append(recR)
SeqIO.write(recs, fw, "fasta") | Extraction of ends of fragments above certain size. | ### Input:
Extraction of ends of fragments above certain size.
### Response:
def extract_ends(rec, sites, flank, fw, maxfragsize=800):
nsites = len(sites)
size = len(rec)
for i, s in enumerate(sites):
newid = "{0}:{1}".format(rec.name, s)
recs = []
if i == 0 or s - sites[i - 1] <= maxfragsize:
newidL = newid + "L"
left = max(s - flank, 0)
right = s
frag = rec.seq[left:right].strip("Nn")
recL = SeqRecord(frag, id=newidL, description="")
if i == 0 and s > maxfragsize:
pass
else:
recs.append(recL)
if i == nsites - 1 or sites[i + 1] - s <= maxfragsize:
newidR = newid + "R"
left = s
right = min(s + flank, size)
frag = rec.seq[left:right].strip("Nn")
recR = SeqRecord(frag, id=newidR, description="")
if i == nsites - 1 and size - s > maxfragsize:
pass
else:
recs.append(recR)
SeqIO.write(recs, fw, "fasta") |
def config_dict(config_file=None, auto_find=False, verify=True, **cfg_options):
if not config_file:
config_file = []
cfg_parser = ConfigParser.ConfigParser(**cfg_options)
cfg_files = []
if config_file:
if not isinstance(config_file, (list, tuple)):
if isinstance(config_file, str):
cfg_files.append(config_file)
else:
raise TypeError("config_files must be a list or a string")
else:
cfg_files.extend(config_file)
if auto_find:
cfg_files.extend(find_files_list(
current_root if isinstance(auto_find, bool) else auto_find,
ext=(".cfg", ".config", ".ini")))
logger.info("config files to be used: {0}".format(cfg_files))
if verify:
cfg_parser.read([cfg for cfg in cfg_files if os.path.exists(cfg)])
else:
cfg_parser.read(cfg_files)
return dict((section, dict(cfg_parser.items(section)))
for section in cfg_parser.sections()) | Return configuration options as dictionary. Accepts either a single
config file or a list of files. Auto find will search for all .cfg, .config
and .ini in the execution directory and package root (unsafe but handy).
.. code:: python
reusables.config_dict(os.path.join("test", "data", "test_config.ini"))
# {'General': {'example': 'A regular string'},
# 'Section 2': {'anint': '234',
# 'examplelist': '234,123,234,543',
# 'floatly': '4.4',
# 'my_bool': 'yes'}}
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: dictionary of the config files | ### Input:
Return configuration options as dictionary. Accepts either a single
config file or a list of files. Auto find will search for all .cfg, .config
and .ini in the execution directory and package root (unsafe but handy).
.. code:: python
reusables.config_dict(os.path.join("test", "data", "test_config.ini"))
# {'General': {'example': 'A regular string'},
# 'Section 2': {'anint': '234',
# 'examplelist': '234,123,234,543',
# 'floatly': '4.4',
# 'my_bool': 'yes'}}
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: dictionary of the config files
### Response:
def config_dict(config_file=None, auto_find=False, verify=True, **cfg_options):
if not config_file:
config_file = []
cfg_parser = ConfigParser.ConfigParser(**cfg_options)
cfg_files = []
if config_file:
if not isinstance(config_file, (list, tuple)):
if isinstance(config_file, str):
cfg_files.append(config_file)
else:
raise TypeError("config_files must be a list or a string")
else:
cfg_files.extend(config_file)
if auto_find:
cfg_files.extend(find_files_list(
current_root if isinstance(auto_find, bool) else auto_find,
ext=(".cfg", ".config", ".ini")))
logger.info("config files to be used: {0}".format(cfg_files))
if verify:
cfg_parser.read([cfg for cfg in cfg_files if os.path.exists(cfg)])
else:
cfg_parser.read(cfg_files)
return dict((section, dict(cfg_parser.items(section)))
for section in cfg_parser.sections()) |
def wrap(txt, width=80, ident=0):
ident = * ident
txt = (txt or ).replace(os.linesep, ).strip()
wrapper = textwrap.TextWrapper()
wrapper.fix_sentence_endings = False
wrapper.initial_indent = wrapper.subsequent_indent = ident
return wrapper.wrap(txt) | Wrap text to the required dimensions and clean it up, prepare for display.
:param txt:
:param width:
:return: | ### Input:
Wrap text to the required dimensions and clean it up, prepare for display.
:param txt:
:param width:
:return:
### Response:
def wrap(txt, width=80, ident=0):
ident = * ident
txt = (txt or ).replace(os.linesep, ).strip()
wrapper = textwrap.TextWrapper()
wrapper.fix_sentence_endings = False
wrapper.initial_indent = wrapper.subsequent_indent = ident
return wrapper.wrap(txt) |
def calcperc(b, perc=(0.1,99.9)):
b = checkma(b)
if b.count() > 0:
low = np.percentile(b.compressed(), perc[0])
high = np.percentile(b.compressed(), perc[1])
else:
low = 0
high = 0
return low, high | Calculate values at specified percentiles | ### Input:
Calculate values at specified percentiles
### Response:
def calcperc(b, perc=(0.1,99.9)):
b = checkma(b)
if b.count() > 0:
low = np.percentile(b.compressed(), perc[0])
high = np.percentile(b.compressed(), perc[1])
else:
low = 0
high = 0
return low, high |
def translateDNA_6Frames(sequence) :
trans = (
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
)
return trans | returns 6 translation of sequence. One for each reading frame | ### Input:
returns 6 translation of sequence. One for each reading frame
### Response:
def translateDNA_6Frames(sequence) :
trans = (
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
translateDNA(sequence, ),
)
return trans |
def serialize_me(self, account, bucket_details):
return self.dumps({
"account": account,
"detail": {
"request_parameters": {
"bucket_name": bucket_details["Name"],
"creation_date": bucket_details["CreationDate"].replace(
tzinfo=None, microsecond=0).isoformat() + "Z"
}
}
}).data | Serializes the JSON for the Polling Event Model.
:param account:
:param bucket_details:
:return: | ### Input:
Serializes the JSON for the Polling Event Model.
:param account:
:param bucket_details:
:return:
### Response:
def serialize_me(self, account, bucket_details):
return self.dumps({
"account": account,
"detail": {
"request_parameters": {
"bucket_name": bucket_details["Name"],
"creation_date": bucket_details["CreationDate"].replace(
tzinfo=None, microsecond=0).isoformat() + "Z"
}
}
}).data |
def find(self, locator, find_all=False, search_object=None, force_find=False, exclude_invisible=False):
search_object = self.driver if search_object is None else search_object
attempts = 0
while attempts < self.find_attempts + 1:
if bool(force_find):
js_locator = self.locator_handler.parse_locator(locator)
if js_locator.By != :
raise ValueError(
.format(
js_locator))
elements = self.js_executor.execute_template_and_return_result(
, variables={: js_locator.value})
else:
elements = self.locator_handler.find_by_locator(search_object, locator, True)
all_elements = elements
visible_elements = elements
if exclude_invisible:
visible_elements = [element for element in all_elements if element.is_displayed()]
elements = visible_elements
if len(elements) > 0:
if find_all is True:
for index in range(len(elements)):
elements[index] = WebElementWrapper.WebElementWrapper(self, locator, elements[index],
search_object=search_object)
return elements
elif find_all is False:
return WebElementWrapper.WebElementWrapper(self, locator, elements[0], search_object=search_object)
else:
if attempts >= self.find_attempts:
if find_all is True:
return []
else:
error_message = "Unable to find element after {0} attempts with locator: {1}".format(
attempts,
locator
)
if exclude_invisible and len(visible_elements) == 0 and len(all_elements) > 0:
error_message = "Elements found using locator {}, but none were visible".format(locator)
raise WebDriverWrapperException.WebDriverWrapperException(self, error_message)
else:
attempts += 1 | Attempts to locate an element, trying the number of times specified by the driver wrapper;
Will throw a WebDriverWrapperException if no element is found
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string used to query the element
@type find_all: bool
@param find_all: set to True to locate all located elements as a list
@type search_object: webdriverwrapper.WebElementWrapper
@param force_find: If true will use javascript to find elements
@type force_find: bool
@param search_object: A WebDriver or WebElement object to call find_element(s)_by_xxxxx | ### Input:
Attempts to locate an element, trying the number of times specified by the driver wrapper;
Will throw a WebDriverWrapperException if no element is found
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string used to query the element
@type find_all: bool
@param find_all: set to True to locate all located elements as a list
@type search_object: webdriverwrapper.WebElementWrapper
@param force_find: If true will use javascript to find elements
@type force_find: bool
@param search_object: A WebDriver or WebElement object to call find_element(s)_by_xxxxx
### Response:
def find(self, locator, find_all=False, search_object=None, force_find=False, exclude_invisible=False):
search_object = self.driver if search_object is None else search_object
attempts = 0
while attempts < self.find_attempts + 1:
if bool(force_find):
js_locator = self.locator_handler.parse_locator(locator)
if js_locator.By != :
raise ValueError(
.format(
js_locator))
elements = self.js_executor.execute_template_and_return_result(
, variables={: js_locator.value})
else:
elements = self.locator_handler.find_by_locator(search_object, locator, True)
all_elements = elements
visible_elements = elements
if exclude_invisible:
visible_elements = [element for element in all_elements if element.is_displayed()]
elements = visible_elements
if len(elements) > 0:
if find_all is True:
for index in range(len(elements)):
elements[index] = WebElementWrapper.WebElementWrapper(self, locator, elements[index],
search_object=search_object)
return elements
elif find_all is False:
return WebElementWrapper.WebElementWrapper(self, locator, elements[0], search_object=search_object)
else:
if attempts >= self.find_attempts:
if find_all is True:
return []
else:
error_message = "Unable to find element after {0} attempts with locator: {1}".format(
attempts,
locator
)
if exclude_invisible and len(visible_elements) == 0 and len(all_elements) > 0:
error_message = "Elements found using locator {}, but none were visible".format(locator)
raise WebDriverWrapperException.WebDriverWrapperException(self, error_message)
else:
attempts += 1 |
def reduce_to_2d(arr):
if not isinstance(arr, np.ndarray):
raise ValueError()
ndims = len(arr.shape)
if ndims < 2:
raise ValueError()
slices = ([0] * (ndims - 2)) + [slice(None), slice(None)]
return arr[slices] | Given a np.npdarray with nDims > 2, reduce it to 2d.
It does this by selecting the zeroth coordinate for every dimension greater
than two.
Args:
arr: a numpy ndarray of dimension at least 2.
Returns:
A two-dimensional subarray from the input array.
Raises:
ValueError: If the argument is not a numpy ndarray, or the dimensionality
is too low. | ### Input:
Given a np.npdarray with nDims > 2, reduce it to 2d.
It does this by selecting the zeroth coordinate for every dimension greater
than two.
Args:
arr: a numpy ndarray of dimension at least 2.
Returns:
A two-dimensional subarray from the input array.
Raises:
ValueError: If the argument is not a numpy ndarray, or the dimensionality
is too low.
### Response:
def reduce_to_2d(arr):
if not isinstance(arr, np.ndarray):
raise ValueError()
ndims = len(arr.shape)
if ndims < 2:
raise ValueError()
slices = ([0] * (ndims - 2)) + [slice(None), slice(None)]
return arr[slices] |
def set_figure(self, figure, handle=None):
self.figure = figure
self.bkimage = None
self._push_handle = handle
wd = figure.plot_width
ht = figure.plot_height
self.configure_window(wd, ht)
doc = curdoc()
doc.add_periodic_callback(self.timer_cb, 50)
self.logger.info("figure set") | Call this with the Bokeh figure object. | ### Input:
Call this with the Bokeh figure object.
### Response:
def set_figure(self, figure, handle=None):
self.figure = figure
self.bkimage = None
self._push_handle = handle
wd = figure.plot_width
ht = figure.plot_height
self.configure_window(wd, ht)
doc = curdoc()
doc.add_periodic_callback(self.timer_cb, 50)
self.logger.info("figure set") |
def validate(self, metadata, path, value):
if isinstance(value, Requirement):
if metadata.testing and self.mock_value is not None:
value = self.mock_value
elif self.default_value is not None:
value = self.default_value
elif not value.required:
return None
else:
raise ValidationError(f"Missing required configuration for: {.join(path)}")
try:
return self.type(value)
except ValueError:
raise ValidationError(f"Missing required configuration for: {.join(path)}: {value}") | Validate this requirement. | ### Input:
Validate this requirement.
### Response:
def validate(self, metadata, path, value):
if isinstance(value, Requirement):
if metadata.testing and self.mock_value is not None:
value = self.mock_value
elif self.default_value is not None:
value = self.default_value
elif not value.required:
return None
else:
raise ValidationError(f"Missing required configuration for: {.join(path)}")
try:
return self.type(value)
except ValueError:
raise ValidationError(f"Missing required configuration for: {.join(path)}: {value}") |
def _process_group(input_group, required_group, groupname, append_subgroups=None):
if append_subgroups is None:
append_subgroups = []
tool_options = {}
for key in input_group:
_ensure_set_contains(input_group[key], required_group.get(key, {}), groupname + + key)
if key in append_subgroups:
continue
else:
tool_options[key] = input_group[key]
for key in input_group:
if key in append_subgroups:
continue
else:
for yek in append_subgroups:
tool_options[key].update(input_group[yek])
return tool_options | Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict | ### Input:
Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict
### Response:
def _process_group(input_group, required_group, groupname, append_subgroups=None):
if append_subgroups is None:
append_subgroups = []
tool_options = {}
for key in input_group:
_ensure_set_contains(input_group[key], required_group.get(key, {}), groupname + + key)
if key in append_subgroups:
continue
else:
tool_options[key] = input_group[key]
for key in input_group:
if key in append_subgroups:
continue
else:
for yek in append_subgroups:
tool_options[key].update(input_group[yek])
return tool_options |
def authorized(self, environ):
if self.django and settings.LOGIN_REQUIRED:
try:
from django.conf import settings as django_settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.core.exceptions import ObjectDoesNotExist
cookie = SimpleCookie(environ["HTTP_COOKIE"])
cookie_name = django_settings.SESSION_COOKIE_NAME
session_key = cookie[cookie_name].value
session = Session.objects.get(session_key=session_key)
user_id = session.get_decoded().get(SESSION_KEY)
user = User.objects.get(id=user_id)
except (ImportError, KeyError, ObjectDoesNotExist):
return False
return True | If we're running Django and ``GNOTTY_LOGIN_REQUIRED`` is set
to ``True``, pull the session cookie from the environment and
validate that the user is authenticated. | ### Input:
If we're running Django and ``GNOTTY_LOGIN_REQUIRED`` is set
to ``True``, pull the session cookie from the environment and
validate that the user is authenticated.
### Response:
def authorized(self, environ):
if self.django and settings.LOGIN_REQUIRED:
try:
from django.conf import settings as django_settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.core.exceptions import ObjectDoesNotExist
cookie = SimpleCookie(environ["HTTP_COOKIE"])
cookie_name = django_settings.SESSION_COOKIE_NAME
session_key = cookie[cookie_name].value
session = Session.objects.get(session_key=session_key)
user_id = session.get_decoded().get(SESSION_KEY)
user = User.objects.get(id=user_id)
except (ImportError, KeyError, ObjectDoesNotExist):
return False
return True |
def add_z(self, name, prior, q, index=True):
self.z_list.append(LatentVariable(name,len(self.z_list),prior,q))
if index is True:
self.z_indices[name] = {: len(self.z_list)-1, : len(self.z_list)-1} | Adds latent variable
Parameters
----------
name : str
Name of the latent variable
prior : Prior object
Which prior distribution? E.g. Normal(0,1)
q : Distribution object
Which distribution to use for variational approximation
index : boolean
Whether to index the variable in the z_indices dictionary
Returns
----------
None (changes priors in LatentVariables object) | ### Input:
Adds latent variable
Parameters
----------
name : str
Name of the latent variable
prior : Prior object
Which prior distribution? E.g. Normal(0,1)
q : Distribution object
Which distribution to use for variational approximation
index : boolean
Whether to index the variable in the z_indices dictionary
Returns
----------
None (changes priors in LatentVariables object)
### Response:
def add_z(self, name, prior, q, index=True):
self.z_list.append(LatentVariable(name,len(self.z_list),prior,q))
if index is True:
self.z_indices[name] = {: len(self.z_list)-1, : len(self.z_list)-1} |
def set_current_limit(self, value, channel=1):
cmd = "I%d %f" % (channel, value)
self.write(cmd) | channel: 1=OP1, 2=OP2, AUX is not supported | ### Input:
channel: 1=OP1, 2=OP2, AUX is not supported
### Response:
def set_current_limit(self, value, channel=1):
cmd = "I%d %f" % (channel, value)
self.write(cmd) |
def network_security_group_create_or_update(name, resource_group, **kwargs):
if not in kwargs:
rg_props = __salt__[](
resource_group, **kwargs
)
if in rg_props:
log.error(
)
return False
kwargs[] = rg_props[]
netconn = __utils__[](, **kwargs)
try:
secgroupmodel = __utils__[](, , **kwargs)
except TypeError as exc:
result = {: .format(str(exc))}
return result
try:
secgroup = netconn.network_security_groups.create_or_update(
resource_group_name=resource_group,
network_security_group_name=name,
parameters=secgroupmodel
)
secgroup.wait()
secgroup_result = secgroup.result()
result = secgroup_result.as_dict()
except CloudError as exc:
__utils__[](, str(exc), **kwargs)
result = {: str(exc)}
except SerializationError as exc:
result = {: .format(str(exc))}
return result | .. versionadded:: 2019.2.0
Create or update a network security group.
:param name: The name of the network security group to create.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_group_create_or_update testnsg testgroup | ### Input:
.. versionadded:: 2019.2.0
Create or update a network security group.
:param name: The name of the network security group to create.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_group_create_or_update testnsg testgroup
### Response:
def network_security_group_create_or_update(name, resource_group, **kwargs):
if not in kwargs:
rg_props = __salt__[](
resource_group, **kwargs
)
if in rg_props:
log.error(
)
return False
kwargs[] = rg_props[]
netconn = __utils__[](, **kwargs)
try:
secgroupmodel = __utils__[](, , **kwargs)
except TypeError as exc:
result = {: .format(str(exc))}
return result
try:
secgroup = netconn.network_security_groups.create_or_update(
resource_group_name=resource_group,
network_security_group_name=name,
parameters=secgroupmodel
)
secgroup.wait()
secgroup_result = secgroup.result()
result = secgroup_result.as_dict()
except CloudError as exc:
__utils__[](, str(exc), **kwargs)
result = {: str(exc)}
except SerializationError as exc:
result = {: .format(str(exc))}
return result |
async def _write_to_container_stdin(self, write_stream, message):
msg = msgpack.dumps(message, encoding="utf8", use_bin_type=True)
self._logger.debug("Sending %i bytes to container", len(msg))
write_stream.write(struct.pack(, len(msg)))
write_stream.write(msg)
await write_stream.drain() | Send a message to the stdin of a container, with the right data
:param write_stream: asyncio write stream to the stdin of the container
:param message: dict to be msgpacked and sent | ### Input:
Send a message to the stdin of a container, with the right data
:param write_stream: asyncio write stream to the stdin of the container
:param message: dict to be msgpacked and sent
### Response:
async def _write_to_container_stdin(self, write_stream, message):
msg = msgpack.dumps(message, encoding="utf8", use_bin_type=True)
self._logger.debug("Sending %i bytes to container", len(msg))
write_stream.write(struct.pack(, len(msg)))
write_stream.write(msg)
await write_stream.drain() |
def ism(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs,
output_filter_mask=None, out_annotation=None, diff_type="log_odds", rc_handling="maximum"):
seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc}
assert diff_type in ["log_odds", "diff"]
assert rc_handling in ["average", "maximum"]
assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"])
assert get_seq_len(ref)[0] == mutation_positions.shape[0]
assert len(mutation_positions.shape) == 1
if output_filter_mask is None:
if out_annotation is None:
output_filter_mask = np.arange(out_annotation_all_outputs.shape[0])
else:
output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
out_annotation = out_annotation_all_outputs[output_filter_mask]
preds = {}
for k in seqs:
preds[k] = np.array(model.predict(seqs[k])[..., output_filter_mask])
if diff_type == "log_odds":
if np.any([(preds[k].min() < 0 or preds[k].max() > 1) for k in preds]):
warnings.warn("Using log_odds on model outputs that are not bound [0,1]")
diffs = np.log(preds["alt"] / (1 - preds["alt"])) - np.log(preds["ref"] / (1 - preds["ref"]))
diffs_rc = np.log(preds["alt_rc"] / (1 - preds["alt_rc"])) - np.log(preds["ref_rc"] / (1 - preds["ref_rc"]))
elif diff_type == "diff":
diffs = preds["alt"] - preds["ref"]
diffs_rc = preds["alt_rc"] - preds["ref_rc"]
if rc_handling == "average":
diffs = np.mean([diffs, diffs_rc], axis=0)
elif rc_handling == "maximum":
replace_filt = np.abs(diffs) < np.abs(diffs_rc)
diffs[replace_filt] = diffs_rc[replace_filt]
diffs = pd.DataFrame(diffs, columns=out_annotation)
return {"ism": diffs} | In-silico mutagenesis
Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used
in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to
calculate the difference between the outputs created by reference and alternative sequence and two
different methods to select whether to use the output generated from the forward or from the
reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction
has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a
background of predicted effects of random SNPs.
# Arguments
model: Keras model
ref: Input sequence with the reference genotype in the mutation position
ref_rc: Reverse complement of the 'ref' argument
alt: Input sequence with the alternative genotype in the mutation position
alt_rc: Reverse complement of the 'alt' argument
mutation_positions: Position on which the mutation was placed in the forward sequences
out_annotation_all_outputs: Output labels of the model.
output_filter_mask: Mask of boolean values indicating which model outputs should be used.
Use this or 'out_annotation'
out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the
predictions should be calculated.
diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes
the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted
from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned.
rc_handling: "average" or "maximum". Either average over the predictions derived from forward and
reverse-complement predictions ('average') or pick the prediction with the bigger absolute
value ('maximum').
# Returns
Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values
for each (selected) model output and input sequence | ### Input:
In-silico mutagenesis
Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used
in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to
calculate the difference between the outputs created by reference and alternative sequence and two
different methods to select whether to use the output generated from the forward or from the
reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction
has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a
background of predicted effects of random SNPs.
# Arguments
model: Keras model
ref: Input sequence with the reference genotype in the mutation position
ref_rc: Reverse complement of the 'ref' argument
alt: Input sequence with the alternative genotype in the mutation position
alt_rc: Reverse complement of the 'alt' argument
mutation_positions: Position on which the mutation was placed in the forward sequences
out_annotation_all_outputs: Output labels of the model.
output_filter_mask: Mask of boolean values indicating which model outputs should be used.
Use this or 'out_annotation'
out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the
predictions should be calculated.
diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes
the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted
from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned.
rc_handling: "average" or "maximum". Either average over the predictions derived from forward and
reverse-complement predictions ('average') or pick the prediction with the bigger absolute
value ('maximum').
# Returns
Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values
for each (selected) model output and input sequence
### Response:
def ism(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs,
output_filter_mask=None, out_annotation=None, diff_type="log_odds", rc_handling="maximum"):
seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc}
assert diff_type in ["log_odds", "diff"]
assert rc_handling in ["average", "maximum"]
assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"])
assert get_seq_len(ref)[0] == mutation_positions.shape[0]
assert len(mutation_positions.shape) == 1
if output_filter_mask is None:
if out_annotation is None:
output_filter_mask = np.arange(out_annotation_all_outputs.shape[0])
else:
output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
out_annotation = out_annotation_all_outputs[output_filter_mask]
preds = {}
for k in seqs:
preds[k] = np.array(model.predict(seqs[k])[..., output_filter_mask])
if diff_type == "log_odds":
if np.any([(preds[k].min() < 0 or preds[k].max() > 1) for k in preds]):
warnings.warn("Using log_odds on model outputs that are not bound [0,1]")
diffs = np.log(preds["alt"] / (1 - preds["alt"])) - np.log(preds["ref"] / (1 - preds["ref"]))
diffs_rc = np.log(preds["alt_rc"] / (1 - preds["alt_rc"])) - np.log(preds["ref_rc"] / (1 - preds["ref_rc"]))
elif diff_type == "diff":
diffs = preds["alt"] - preds["ref"]
diffs_rc = preds["alt_rc"] - preds["ref_rc"]
if rc_handling == "average":
diffs = np.mean([diffs, diffs_rc], axis=0)
elif rc_handling == "maximum":
replace_filt = np.abs(diffs) < np.abs(diffs_rc)
diffs[replace_filt] = diffs_rc[replace_filt]
diffs = pd.DataFrame(diffs, columns=out_annotation)
return {"ism": diffs} |
def anchored_pairs(self, anchor):
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs) | Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order. | ### Input:
Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order.
### Response:
def anchored_pairs(self, anchor):
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs) |
def oneright(self, window_name, object_name, iterations):
if not self.verifyscrollbarhorizontal(window_name, object_name):
raise LdtpServerException()
object_handle = self._get_object_handle(window_name, object_name)
i = 0
maxValue = 1.0 / 8
flag = False
while i < iterations:
if object_handle.AXValue >= 1:
raise LdtpServerException()
object_handle.AXValue += maxValue
time.sleep(1.0 / 100)
flag = True
i += 1
if flag:
return 1
else:
raise LdtpServerException() | Press scrollbar right with number of iterations
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param interations: iterations to perform on slider increase
@type iterations: integer
@return: 1 on success.
@rtype: integer | ### Input:
Press scrollbar right with number of iterations
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param interations: iterations to perform on slider increase
@type iterations: integer
@return: 1 on success.
@rtype: integer
### Response:
def oneright(self, window_name, object_name, iterations):
if not self.verifyscrollbarhorizontal(window_name, object_name):
raise LdtpServerException()
object_handle = self._get_object_handle(window_name, object_name)
i = 0
maxValue = 1.0 / 8
flag = False
while i < iterations:
if object_handle.AXValue >= 1:
raise LdtpServerException()
object_handle.AXValue += maxValue
time.sleep(1.0 / 100)
flag = True
i += 1
if flag:
return 1
else:
raise LdtpServerException() |
def rename_keys_in_dict(d: Dict[str, Any], renames: Dict[str, str]) -> None:
for old_key, new_key in renames.items():
if new_key == old_key:
continue
if old_key in d:
if new_key in d:
raise ValueError(
"rename_keys_in_dict: renaming {} -> {} but new key "
"already exists".format(repr(old_key), repr(new_key)))
d[new_key] = d.pop(old_key) | Renames, IN PLACE, the keys in ``d`` according to the mapping in
``renames``.
Args:
d: a dictionary to modify
renames: a dictionary of the format ``{old_key_name: new_key_name}``
See
https://stackoverflow.com/questions/4406501/change-the-name-of-a-key-in-dictionary. | ### Input:
Renames, IN PLACE, the keys in ``d`` according to the mapping in
``renames``.
Args:
d: a dictionary to modify
renames: a dictionary of the format ``{old_key_name: new_key_name}``
See
https://stackoverflow.com/questions/4406501/change-the-name-of-a-key-in-dictionary.
### Response:
def rename_keys_in_dict(d: Dict[str, Any], renames: Dict[str, str]) -> None:
for old_key, new_key in renames.items():
if new_key == old_key:
continue
if old_key in d:
if new_key in d:
raise ValueError(
"rename_keys_in_dict: renaming {} -> {} but new key "
"already exists".format(repr(old_key), repr(new_key)))
d[new_key] = d.pop(old_key) |
def remover(file_path):
if os.path.isfile(file_path):
os.remove(file_path)
return True
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return True
else:
return False | Delete a file or directory path only if it exists. | ### Input:
Delete a file or directory path only if it exists.
### Response:
def remover(file_path):
if os.path.isfile(file_path):
os.remove(file_path)
return True
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return True
else:
return False |
def _save_cache(self, filename, section_number_of_pages, page_references):
cache_path = Path(filename).with_suffix(self.CACHE_EXTENSION)
with cache_path.open() as file:
cache = (section_number_of_pages, page_references)
pickle.dump(cache, file) | Save the current state of the page references to `<filename>.rtc` | ### Input:
Save the current state of the page references to `<filename>.rtc`
### Response:
def _save_cache(self, filename, section_number_of_pages, page_references):
cache_path = Path(filename).with_suffix(self.CACHE_EXTENSION)
with cache_path.open() as file:
cache = (section_number_of_pages, page_references)
pickle.dump(cache, file) |
def _fix_paths(self, data):
data[] = [join(, path) for path in data[]]
if data[]:
data[] = join(, data[])
data[] = {}
for attribute in SOURCE_KEYS:
for k, v in data[attribute].items():
if k not in data[]:
data[][k] = []
data[][k].extend([join(, file) for file in v])
for k,v in data[].items():
if k not in data[]:
data[][k] = []
data[][k].extend([join(, file) for file in v])
data[] = OrderedDict(sorted(data[].items(), key=lambda t: t[0])) | All paths needs to be fixed - add PROJ_DIR prefix + normalize | ### Input:
All paths needs to be fixed - add PROJ_DIR prefix + normalize
### Response:
def _fix_paths(self, data):
data[] = [join(, path) for path in data[]]
if data[]:
data[] = join(, data[])
data[] = {}
for attribute in SOURCE_KEYS:
for k, v in data[attribute].items():
if k not in data[]:
data[][k] = []
data[][k].extend([join(, file) for file in v])
for k,v in data[].items():
if k not in data[]:
data[][k] = []
data[][k].extend([join(, file) for file in v])
data[] = OrderedDict(sorted(data[].items(), key=lambda t: t[0])) |
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
return self._proxy.update(enabled=enabled, webhook_url=webhook_url, webhook_method=webhook_method, ) | Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method
:returns: Updated ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance | ### Input:
Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method
:returns: Updated ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance
### Response:
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
return self._proxy.update(enabled=enabled, webhook_url=webhook_url, webhook_method=webhook_method, ) |
def wait(self, **kwargs):
if self.request:
self.request.wait(**kwargs)
self.request = None
return self.inflate() | Wait until any pending asynchronous requests are finished for this collection. | ### Input:
Wait until any pending asynchronous requests are finished for this collection.
### Response:
def wait(self, **kwargs):
if self.request:
self.request.wait(**kwargs)
self.request = None
return self.inflate() |
def unpack_messages(self, partitions_msgs):
for pmsg in partitions_msgs:
key = pmsg[]
partition = pmsg[]
offset = pmsg[]
msg = pmsg.pop()
if msg:
try:
record = self.deserialize_fun(msg)
except Exception as e:
log.error(.format(partition, offset, key, repr(e)))
continue
else:
if isinstance(record, dict):
pmsg[] = record
yield pmsg
else:
log.info(.format(key))
else:
yield pmsg | Deserialize a message to python structures | ### Input:
Deserialize a message to python structures
### Response:
def unpack_messages(self, partitions_msgs):
for pmsg in partitions_msgs:
key = pmsg[]
partition = pmsg[]
offset = pmsg[]
msg = pmsg.pop()
if msg:
try:
record = self.deserialize_fun(msg)
except Exception as e:
log.error(.format(partition, offset, key, repr(e)))
continue
else:
if isinstance(record, dict):
pmsg[] = record
yield pmsg
else:
log.info(.format(key))
else:
yield pmsg |
def request(self, *args, **kwargs):
return RequestsHandler(*args, api_token=self.api_token,
verify=self.mist_client.verify,
job_id=self.mist_client.job_id, **kwargs) | The main purpose of this is to be a wrapper-like function to pass the api_token and all the other params to the
requests that are being made
:returns: An instance of RequestsHandler | ### Input:
The main purpose of this is to be a wrapper-like function to pass the api_token and all the other params to the
requests that are being made
:returns: An instance of RequestsHandler
### Response:
def request(self, *args, **kwargs):
return RequestsHandler(*args, api_token=self.api_token,
verify=self.mist_client.verify,
job_id=self.mist_client.job_id, **kwargs) |
def transition(self, duration, is_on=None, **kwargs):
self._cancel_active_transition()
dest_state = self._prepare_transition(is_on, **kwargs)
total_steps = self._transition_steps(**dest_state)
state_stages = [self._transition_stage(step, total_steps, **dest_state)
for step in range(total_steps)]
pwm_stages = [self._get_pwm_values(**stage)
for stage in state_stages]
callback = partial(self._transition_callback, is_on)
self._active_transition = Transition(self._driver, duration,
state_stages, pwm_stages,
callback)
TransitionManager().execute(self._active_transition)
return self._active_transition | Transition to the specified state of the led.
If another transition is already running, it is aborted.
:param duration: The duration of the transition.
:param is_on: The on-off state to transition to.
:param kwargs: The state to transition to. | ### Input:
Transition to the specified state of the led.
If another transition is already running, it is aborted.
:param duration: The duration of the transition.
:param is_on: The on-off state to transition to.
:param kwargs: The state to transition to.
### Response:
def transition(self, duration, is_on=None, **kwargs):
self._cancel_active_transition()
dest_state = self._prepare_transition(is_on, **kwargs)
total_steps = self._transition_steps(**dest_state)
state_stages = [self._transition_stage(step, total_steps, **dest_state)
for step in range(total_steps)]
pwm_stages = [self._get_pwm_values(**stage)
for stage in state_stages]
callback = partial(self._transition_callback, is_on)
self._active_transition = Transition(self._driver, duration,
state_stages, pwm_stages,
callback)
TransitionManager().execute(self._active_transition)
return self._active_transition |
def _fmadm_action_fmri(action, fmri):
ret = {}
fmadm = _check_fmadm()
cmd = .format(
cmd=fmadm,
action=action,
fmri=fmri
)
res = __salt__[](cmd)
retcode = res[]
result = {}
if retcode != 0:
result[] = res[]
else:
result = True
return result | Internal function for fmadm.repqired, fmadm.replaced, fmadm.flush | ### Input:
Internal function for fmadm.repqired, fmadm.replaced, fmadm.flush
### Response:
def _fmadm_action_fmri(action, fmri):
ret = {}
fmadm = _check_fmadm()
cmd = .format(
cmd=fmadm,
action=action,
fmri=fmri
)
res = __salt__[](cmd)
retcode = res[]
result = {}
if retcode != 0:
result[] = res[]
else:
result = True
return result |
def bronk(r, p, x, graph):
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)]
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)]
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex) | This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies | ### Input:
This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies
### Response:
def bronk(r, p, x, graph):
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)]
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)]
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex) |
def start_subscribe(self):
if not self.conn:
raise ValueError()
elif not self.pubsub_conn:
raise ValueError()
return Subscription(self) | Create a new Subscription context manager. | ### Input:
Create a new Subscription context manager.
### Response:
def start_subscribe(self):
if not self.conn:
raise ValueError()
elif not self.pubsub_conn:
raise ValueError()
return Subscription(self) |
def write_command(cls, writer, name, buffers=()):
assert len(name) < 256
body_len = len(name) + 1 + sum(len(buffer) for buffer in buffers)
if body_len < 256:
writer.write(struct.pack(, 0x04, body_len, len(name)))
else:
writer.write(struct.pack(, 0x06, body_len, len(name)))
writer.write(name)
for buffer in buffers:
writer.write(buffer) | Write a command to the specified writer.
:param writer: The writer to use.
:param name: The command name.
:param buffers: The buffers to writer. | ### Input:
Write a command to the specified writer.
:param writer: The writer to use.
:param name: The command name.
:param buffers: The buffers to writer.
### Response:
def write_command(cls, writer, name, buffers=()):
assert len(name) < 256
body_len = len(name) + 1 + sum(len(buffer) for buffer in buffers)
if body_len < 256:
writer.write(struct.pack(, 0x04, body_len, len(name)))
else:
writer.write(struct.pack(, 0x06, body_len, len(name)))
writer.write(name)
for buffer in buffers:
writer.write(buffer) |
def write_header(self, out_strm, delim, f1_num_fields, f2_num_fields,
f1_header=None, f2_header=None, missing_val=None):
mm = f1_header != f2_header
one_none = f1_header is None or f2_header is None
if mm and one_none and missing_val is None:
raise InvalidHeaderError("Cannot generate output header when one " +
"input file is missing a header and no " +
"missing value was provided to replace " +
"unknown entries.")
if f1_header is not None and f2_header is not None:
out_strm.write(delim.join(f1_header) + delim +
delim.join(f2_header) + "\n")
elif f1_header is None and f2_header is not None:
dummy_h = f1_num_fields * [missing_val]
out_strm.write(delim.join(dummy_h) + delim +
delim.join(f2_header) + "\n")
elif f1_header is not None and f2_header is None:
dummy_h = f2_num_fields * [missing_val]
out_strm.write(delim.join(f1_header) + delim +
delim.join(dummy_h) + "\n") | Write the header for a joined file. If headers are provided for one or more
of the input files, then a header is generated for the output file.
Otherwise, this does not output anything.
:param out_strm: write to this stream
:param delim:
:param f1_num_fields: the number of columns in the first file
:param f2_num_fields: the number of columns in the second file
:param f1_header:
:param f2_header:
:param missing_val: | ### Input:
Write the header for a joined file. If headers are provided for one or more
of the input files, then a header is generated for the output file.
Otherwise, this does not output anything.
:param out_strm: write to this stream
:param delim:
:param f1_num_fields: the number of columns in the first file
:param f2_num_fields: the number of columns in the second file
:param f1_header:
:param f2_header:
:param missing_val:
### Response:
def write_header(self, out_strm, delim, f1_num_fields, f2_num_fields,
f1_header=None, f2_header=None, missing_val=None):
mm = f1_header != f2_header
one_none = f1_header is None or f2_header is None
if mm and one_none and missing_val is None:
raise InvalidHeaderError("Cannot generate output header when one " +
"input file is missing a header and no " +
"missing value was provided to replace " +
"unknown entries.")
if f1_header is not None and f2_header is not None:
out_strm.write(delim.join(f1_header) + delim +
delim.join(f2_header) + "\n")
elif f1_header is None and f2_header is not None:
dummy_h = f1_num_fields * [missing_val]
out_strm.write(delim.join(dummy_h) + delim +
delim.join(f2_header) + "\n")
elif f1_header is not None and f2_header is None:
dummy_h = f2_num_fields * [missing_val]
out_strm.write(delim.join(f1_header) + delim +
delim.join(dummy_h) + "\n") |
def get_tokens(text, encoding=None):
if isinstance(text, file_types):
text = text.read()
if isinstance(text, text_type):
pass
elif isinstance(text, bytes):
if encoding:
text = text.decode(encoding)
else:
try:
text = text.decode()
except UnicodeDecodeError:
text = text.decode()
else:
raise TypeError(u"Expected text or file-like object, got {!r}".
format(type(text)))
iterable = enumerate(text)
for pos, char in iterable:
for rexmatch, action in SQL_REGEX:
m = rexmatch(text, pos)
if not m:
continue
elif isinstance(action, tokens._TokenType):
yield action, m.group()
elif callable(action):
yield action(m.group())
consume(iterable, m.end() - pos - 1)
break
else:
yield tokens.Error, char | Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
Split ``text`` into (tokentype, text) pairs.
``stack`` is the initial stack (default: ``['root']``) | ### Input:
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
Split ``text`` into (tokentype, text) pairs.
``stack`` is the initial stack (default: ``['root']``)
### Response:
def get_tokens(text, encoding=None):
if isinstance(text, file_types):
text = text.read()
if isinstance(text, text_type):
pass
elif isinstance(text, bytes):
if encoding:
text = text.decode(encoding)
else:
try:
text = text.decode()
except UnicodeDecodeError:
text = text.decode()
else:
raise TypeError(u"Expected text or file-like object, got {!r}".
format(type(text)))
iterable = enumerate(text)
for pos, char in iterable:
for rexmatch, action in SQL_REGEX:
m = rexmatch(text, pos)
if not m:
continue
elif isinstance(action, tokens._TokenType):
yield action, m.group()
elif callable(action):
yield action(m.group())
consume(iterable, m.end() - pos - 1)
break
else:
yield tokens.Error, char |
def has_cwd(state, dir, incorrect_msg="Your current working directory should be `{{dir}}`. Use `cd {{dir}}` to navigate there."):
expr = "[[ $PWD == ]]".format(dir)
_msg = state.build_message(incorrect_msg, fmt_kwargs={ : dir })
has_expr_exit_code(state, expr, output="0", incorrect_msg=_msg)
return state | Check whether the student is in the expected directory.
This check is typically used before using ``has_expr_output()``
to make sure the student didn't navigate somewhere else.
Args:
state: State instance describing student and solution code. Can be omitted if used with ``Ex()``.
dir: Directory that the student should be in. Always use the absolute path.
incorrect_msg: If specified, this overrides the automatically generated message in
case the student is not in the expected directory.
:Example:
If you want to be sure that the student is in ``/home/repl/my_dir``: ::
Ex().has_cwd('/home/repl/my_dir') | ### Input:
Check whether the student is in the expected directory.
This check is typically used before using ``has_expr_output()``
to make sure the student didn't navigate somewhere else.
Args:
state: State instance describing student and solution code. Can be omitted if used with ``Ex()``.
dir: Directory that the student should be in. Always use the absolute path.
incorrect_msg: If specified, this overrides the automatically generated message in
case the student is not in the expected directory.
:Example:
If you want to be sure that the student is in ``/home/repl/my_dir``: ::
Ex().has_cwd('/home/repl/my_dir')
### Response:
def has_cwd(state, dir, incorrect_msg="Your current working directory should be `{{dir}}`. Use `cd {{dir}}` to navigate there."):
expr = "[[ $PWD == ]]".format(dir)
_msg = state.build_message(incorrect_msg, fmt_kwargs={ : dir })
has_expr_exit_code(state, expr, output="0", incorrect_msg=_msg)
return state |
def process_dynesty_run(results):
samples = np.zeros((results.samples.shape[0],
results.samples.shape[1] + 3))
samples[:, 0] = results.logl
samples[:, 1] = results.samples_id
samples[:, 3:] = results.samples
unique_th, first_inds = np.unique(results.samples_id, return_index=True)
assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))
thread_min_max = np.full((unique_th.shape[0], 2), np.nan)
try:
assert unique_th.shape[0] == results.nlive
assert np.array_equal(
np.unique(results.samples_id[-results.nlive:]),
np.asarray(range(results.nlive))), (
)
thread_min_max[:, 0] = -np.inf
except AttributeError:
assert unique_th.shape[0] == sum(results.batch_nlive)
for th_lab, ind in zip(unique_th, first_inds):
thread_min_max[th_lab, 0] = (
results.batch_bounds[results.samples_batch[ind], 0])
for th_lab in unique_th:
final_ind = np.where(results.samples_id == th_lab)[0][-1]
thread_min_max[th_lab, 1] = results.logl[final_ind]
samples[final_ind, 2] = -1
assert np.all(~np.isnan(thread_min_max))
run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)
nestcheck.ns_run_utils.check_ns_run(run)
return run | Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details). | ### Input:
Transforms results from a dynesty run into the nestcheck dictionary
format for analysis. This function has been tested with dynesty v9.2.0.
Note that the nestcheck point weights and evidence will not be exactly
the same as the dynesty ones as nestcheck calculates logX volumes more
precisely (using the trapezium rule).
This function does not require the birth_inds_given_contours and
threads_given_birth_inds functions as dynesty results objects
already include thread labels via their samples_id property. If the
dynesty run is dynamic, the batch_bounds property is need to determine
the threads' starting birth contours.
Parameters
----------
results: dynesty results object
N.B. the remaining live points at termination must be included in the
results (dynesty samplers' run_nested method does this if
add_live_points=True - its default value).
Returns
-------
ns_run: dict
Nested sampling run dict (see the module docstring for more details).
### Response:
def process_dynesty_run(results):
samples = np.zeros((results.samples.shape[0],
results.samples.shape[1] + 3))
samples[:, 0] = results.logl
samples[:, 1] = results.samples_id
samples[:, 3:] = results.samples
unique_th, first_inds = np.unique(results.samples_id, return_index=True)
assert np.array_equal(unique_th, np.asarray(range(unique_th.shape[0])))
thread_min_max = np.full((unique_th.shape[0], 2), np.nan)
try:
assert unique_th.shape[0] == results.nlive
assert np.array_equal(
np.unique(results.samples_id[-results.nlive:]),
np.asarray(range(results.nlive))), (
)
thread_min_max[:, 0] = -np.inf
except AttributeError:
assert unique_th.shape[0] == sum(results.batch_nlive)
for th_lab, ind in zip(unique_th, first_inds):
thread_min_max[th_lab, 0] = (
results.batch_bounds[results.samples_batch[ind], 0])
for th_lab in unique_th:
final_ind = np.where(results.samples_id == th_lab)[0][-1]
thread_min_max[th_lab, 1] = results.logl[final_ind]
samples[final_ind, 2] = -1
assert np.all(~np.isnan(thread_min_max))
run = nestcheck.ns_run_utils.dict_given_run_array(samples, thread_min_max)
nestcheck.ns_run_utils.check_ns_run(run)
return run |
def pangram(language=):
try:
pangram = get_pangram(language)
except KeyError:
raise template.TemplateSyntaxError(
"Could not find a pangram for %r abbreviation" % language
)
return get_pangram_html(pangram) | Prints a pangram in the specified language.
A pangram is a phrase that includes every letter of an alphabet.
Default is English. For a full list of available languages,
refer to pangrams.py
Usage format::
{% pangram [language] %}
``language`` is the two-letter abbreviation the desired language.
Examples:
* ``{% pangram %}`` will output the default English pangram.
* ``{% pangram 'fr' %}`` will output a French pangram. | ### Input:
Prints a pangram in the specified language.
A pangram is a phrase that includes every letter of an alphabet.
Default is English. For a full list of available languages,
refer to pangrams.py
Usage format::
{% pangram [language] %}
``language`` is the two-letter abbreviation the desired language.
Examples:
* ``{% pangram %}`` will output the default English pangram.
* ``{% pangram 'fr' %}`` will output a French pangram.
### Response:
def pangram(language=):
try:
pangram = get_pangram(language)
except KeyError:
raise template.TemplateSyntaxError(
"Could not find a pangram for %r abbreviation" % language
)
return get_pangram_html(pangram) |
def _get_mapping(self, schema):
properties = {}
for field, field_schema in schema.items():
field_mapping = self._get_field_mapping(field_schema)
if field_mapping:
properties[field] = field_mapping
return {: properties} | Get mapping for given resource or item schema.
:param schema: resource or dict/list type item schema | ### Input:
Get mapping for given resource or item schema.
:param schema: resource or dict/list type item schema
### Response:
def _get_mapping(self, schema):
properties = {}
for field, field_schema in schema.items():
field_mapping = self._get_field_mapping(field_schema)
if field_mapping:
properties[field] = field_mapping
return {: properties} |
def AppendUnique(self, delete_existing=0, **kw):
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, delete_existing)
if key not in self._dict or self._dict[key] in (, None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if key == :
tmp = []
for i in val:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
val = tmp
if SCons.Util.is_Dict(dk):
tmp = []
for (k, v) in dk.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
dk = tmp
elif SCons.Util.is_String(dk):
dk = [(dk,)]
else:
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
else:
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = dk + val
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
if key == :
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
if SCons.Util.is_Dict(val):
tmp = []
for (k, v) in val.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
val = tmp
elif SCons.Util.is_String(val):
val = [(val,)]
if delete_existing:
dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + val
else:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
else:
if delete_existing:
dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + [val]
else:
if not val in dk:
self._dict[key] = dk + [val]
else:
if key == :
if SCons.Util.is_String(dk):
dk = [dk]
elif SCons.Util.is_Dict(dk):
tmp = []
for (k, v) in dk.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
dk = tmp
if SCons.Util.is_String(val):
if val in dk:
val = []
else:
val = [val]
elif SCons.Util.is_Dict(val):
tmp = []
for i,j in val.items():
if j is not None:
tmp.append((i,j))
else:
tmp.append(i)
val = tmp
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
self.scanner_map_delete(kw) | Append values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to end. | ### Input:
Append values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to end.
### Response:
def AppendUnique(self, delete_existing=0, **kw):
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, delete_existing)
if key not in self._dict or self._dict[key] in (, None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if key == :
tmp = []
for i in val:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
val = tmp
if SCons.Util.is_Dict(dk):
tmp = []
for (k, v) in dk.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
dk = tmp
elif SCons.Util.is_String(dk):
dk = [(dk,)]
else:
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
else:
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = dk + val
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
if key == :
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
if SCons.Util.is_Dict(val):
tmp = []
for (k, v) in val.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
val = tmp
elif SCons.Util.is_String(val):
val = [(val,)]
if delete_existing:
dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + val
else:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
else:
if delete_existing:
dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + [val]
else:
if not val in dk:
self._dict[key] = dk + [val]
else:
if key == :
if SCons.Util.is_String(dk):
dk = [dk]
elif SCons.Util.is_Dict(dk):
tmp = []
for (k, v) in dk.items():
if v is not None:
tmp.append((k, v))
else:
tmp.append((k,))
dk = tmp
if SCons.Util.is_String(val):
if val in dk:
val = []
else:
val = [val]
elif SCons.Util.is_Dict(val):
tmp = []
for i,j in val.items():
if j is not None:
tmp.append((i,j))
else:
tmp.append(i)
val = tmp
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
self.scanner_map_delete(kw) |
def attr(self, name):
nodes = self._do_query(multiple=False)
val = self.poco.agent.hierarchy.getAttr(nodes, name)
if six.PY2 and isinstance(val, six.text_type):
val = val.encode()
return val | Retrieve the attribute of UI element by given attribute name. Return None if attribute does not exist.
If attribute type is :obj:`str`, it is encoded to utf-8 as :obj:`str` in Python2.7.
Args:
name:
attribute name, it can be one of the following or any other customized type implemented by SDK
- visible: whether or not it is visible to user
- text: string value of the UI element
- type: the type name of UI element from remote runtime
- pos: the position of the UI element
- size: the percentage size [width, height] in range of 0~1 according to the screen
- name: the name of UI element
- ...: other sdk implemented attributes
Returns:
None if no such attribute or its value is None/null/nil/etc. Otherwise the attribute value is returned. The
returned value type is json serializable. In both py2 and py3, if the attribute value in remote is a
text-like object, the return value type will be :obj:`str`.
Raises:
PocoNoSuchNodeException: when the UI element does not exists
.. note:: Exception :py:class:`NodeHasBeenRemovedException` is caught automatically.
See Also:
:py:meth:`UI element attributes in poco sdk definition <poco.sdk.AbstractNode.AbstractNode.getAttr>`. | ### Input:
Retrieve the attribute of UI element by given attribute name. Return None if attribute does not exist.
If attribute type is :obj:`str`, it is encoded to utf-8 as :obj:`str` in Python2.7.
Args:
name:
attribute name, it can be one of the following or any other customized type implemented by SDK
- visible: whether or not it is visible to user
- text: string value of the UI element
- type: the type name of UI element from remote runtime
- pos: the position of the UI element
- size: the percentage size [width, height] in range of 0~1 according to the screen
- name: the name of UI element
- ...: other sdk implemented attributes
Returns:
None if no such attribute or its value is None/null/nil/etc. Otherwise the attribute value is returned. The
returned value type is json serializable. In both py2 and py3, if the attribute value in remote is a
text-like object, the return value type will be :obj:`str`.
Raises:
PocoNoSuchNodeException: when the UI element does not exists
.. note:: Exception :py:class:`NodeHasBeenRemovedException` is caught automatically.
See Also:
:py:meth:`UI element attributes in poco sdk definition <poco.sdk.AbstractNode.AbstractNode.getAttr>`.
### Response:
def attr(self, name):
nodes = self._do_query(multiple=False)
val = self.poco.agent.hierarchy.getAttr(nodes, name)
if six.PY2 and isinstance(val, six.text_type):
val = val.encode()
return val |
def _add_new_next_method(cls):
def new_next(self):
field_values = [next(g) for g in self.field_gens.values()]
return self.item_cls(*field_values)
cls.__next__ = new_next | TODO | ### Input:
TODO
### Response:
def _add_new_next_method(cls):
def new_next(self):
field_values = [next(g) for g in self.field_gens.values()]
return self.item_cls(*field_values)
cls.__next__ = new_next |
def cmd_search(*args):
dsearch = get_docsearch()
verbose("Search: {}".format(" ".join(args)))
r = {: []}
docs = dsearch.find_documents(" ".join(args))
docs.sort(key=lambda doc: doc.docid)
for doc in docs:
r[].append({
: doc.docid,
: doc.nb_pages,
: [l.name for l in doc.labels],
})
reply(r) | Arguments: <keyword1> [<keyword2> [<keyword3> [...]]]
List the documents containing the keywords.
Syntax is the same than with the search field in Paperwork-gui.
Search "" (empty string) to get all the documents.
Example: 'label:contrat AND paperwork'
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"results" [
{"docid": "xxxx", "nb_pages": 22, "labels": ["xxx", "yyy"]}
{"docid": "yyyy", "nb_pages": 22, "labels": ["xxx", "yyy"]}
{"docid": "zzzz", "nb_pages": 22, "labels": ["xxx", "yyy"]}
],
} | ### Input:
Arguments: <keyword1> [<keyword2> [<keyword3> [...]]]
List the documents containing the keywords.
Syntax is the same than with the search field in Paperwork-gui.
Search "" (empty string) to get all the documents.
Example: 'label:contrat AND paperwork'
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"results" [
{"docid": "xxxx", "nb_pages": 22, "labels": ["xxx", "yyy"]}
{"docid": "yyyy", "nb_pages": 22, "labels": ["xxx", "yyy"]}
{"docid": "zzzz", "nb_pages": 22, "labels": ["xxx", "yyy"]}
],
}
### Response:
def cmd_search(*args):
dsearch = get_docsearch()
verbose("Search: {}".format(" ".join(args)))
r = {: []}
docs = dsearch.find_documents(" ".join(args))
docs.sort(key=lambda doc: doc.docid)
for doc in docs:
r[].append({
: doc.docid,
: doc.nb_pages,
: [l.name for l in doc.labels],
})
reply(r) |
def get_resource_area_by_host(self, area_id, host_id):
route_values = {}
if area_id is not None:
route_values[] = self._serialize.url(, area_id, )
query_parameters = {}
if host_id is not None:
query_parameters[] = self._serialize.query(, host_id, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response) | GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>` | ### Input:
GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
### Response:
def get_resource_area_by_host(self, area_id, host_id):
route_values = {}
if area_id is not None:
route_values[] = self._serialize.url(, area_id, )
query_parameters = {}
if host_id is not None:
query_parameters[] = self._serialize.query(, host_id, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response) |
def groups_rename(self, room_id, name, **kwargs):
return self.__call_api_post(, roomId=room_id, name=name, kwargs=kwargs) | Changes the name of the private group. | ### Input:
Changes the name of the private group.
### Response:
def groups_rename(self, room_id, name, **kwargs):
return self.__call_api_post(, roomId=room_id, name=name, kwargs=kwargs) |
def get_download_progress(self):
downloaded = CRef.cint()
total = CRef.cint()
result = self._iface.get_dlc_download_progress(self.app_id, downloaded, total)
if not result:
return 0, 0
return int(downloaded), int(total) | Returns tuple with download progress (for optional DLCs):
(bytes_downloaded, bytes_total)
:rtype: tuple | ### Input:
Returns tuple with download progress (for optional DLCs):
(bytes_downloaded, bytes_total)
:rtype: tuple
### Response:
def get_download_progress(self):
downloaded = CRef.cint()
total = CRef.cint()
result = self._iface.get_dlc_download_progress(self.app_id, downloaded, total)
if not result:
return 0, 0
return int(downloaded), int(total) |
def send(hotkey, do_press=True, do_release=True):
_listener.is_replaying = True
parsed = parse_hotkey(hotkey)
for step in parsed:
if do_press:
for scan_codes in step:
_os_keyboard.press(scan_codes[0])
if do_release:
for scan_codes in reversed(step):
_os_keyboard.release(scan_codes[0])
_listener.is_replaying = False | Sends OS events that perform the given *hotkey* hotkey.
- `hotkey` can be either a scan code (e.g. 57 for space), single key
(e.g. 'space') or multi-key, multi-step hotkey (e.g. 'alt+F4, enter').
- `do_press` if true then press events are sent. Defaults to True.
- `do_release` if true then release events are sent. Defaults to True.
send(57)
send('ctrl+alt+del')
send('alt+F4, enter')
send('shift+s')
Note: keys are released in the opposite order they were pressed. | ### Input:
Sends OS events that perform the given *hotkey* hotkey.
- `hotkey` can be either a scan code (e.g. 57 for space), single key
(e.g. 'space') or multi-key, multi-step hotkey (e.g. 'alt+F4, enter').
- `do_press` if true then press events are sent. Defaults to True.
- `do_release` if true then release events are sent. Defaults to True.
send(57)
send('ctrl+alt+del')
send('alt+F4, enter')
send('shift+s')
Note: keys are released in the opposite order they were pressed.
### Response:
def send(hotkey, do_press=True, do_release=True):
_listener.is_replaying = True
parsed = parse_hotkey(hotkey)
for step in parsed:
if do_press:
for scan_codes in step:
_os_keyboard.press(scan_codes[0])
if do_release:
for scan_codes in reversed(step):
_os_keyboard.release(scan_codes[0])
_listener.is_replaying = False |
def listenQ2Q(self, fromAddress, protocolsToFactories, serverDescription):
myDomain = fromAddress.domainAddress()
D = self.getSecureConnection(fromAddress, myDomain)
def _secured(proto):
lfm = self.localFactoriesMapping
def startup(listenResult):
for protocol, factory in protocolsToFactories.iteritems():
key = (fromAddress, protocol)
if key not in lfm:
lfm[key] = []
lfm[key].append((factory, serverDescription))
factory.doStart()
def shutdown():
for protocol, factory in protocolsToFactories.iteritems():
lfm[fromAddress, protocol].remove(
(factory, serverDescription))
factory.doStop()
proto.notifyOnConnectionLost(shutdown)
return listenResult
if self.dispatcher is not None:
gp = proto.transport.getPeer()
udpAddress = (gp.host, gp.port)
pubUDPDeferred = self._retrievePublicUDPPortNumber(udpAddress)
else:
pubUDPDeferred = defer.succeed(None)
def _gotPubUDPPort(publicAddress):
self._publicUDPAddress = publicAddress
return proto.listen(fromAddress, protocolsToFactories.keys(),
serverDescription).addCallback(startup)
pubUDPDeferred.addCallback(_gotPubUDPPort)
return pubUDPDeferred
D.addCallback(_secured)
return D | Right now this is really only useful in the client implementation,
since it is transient. protocolFactoryFactory is used for persistent
listeners. | ### Input:
Right now this is really only useful in the client implementation,
since it is transient. protocolFactoryFactory is used for persistent
listeners.
### Response:
def listenQ2Q(self, fromAddress, protocolsToFactories, serverDescription):
myDomain = fromAddress.domainAddress()
D = self.getSecureConnection(fromAddress, myDomain)
def _secured(proto):
lfm = self.localFactoriesMapping
def startup(listenResult):
for protocol, factory in protocolsToFactories.iteritems():
key = (fromAddress, protocol)
if key not in lfm:
lfm[key] = []
lfm[key].append((factory, serverDescription))
factory.doStart()
def shutdown():
for protocol, factory in protocolsToFactories.iteritems():
lfm[fromAddress, protocol].remove(
(factory, serverDescription))
factory.doStop()
proto.notifyOnConnectionLost(shutdown)
return listenResult
if self.dispatcher is not None:
gp = proto.transport.getPeer()
udpAddress = (gp.host, gp.port)
pubUDPDeferred = self._retrievePublicUDPPortNumber(udpAddress)
else:
pubUDPDeferred = defer.succeed(None)
def _gotPubUDPPort(publicAddress):
self._publicUDPAddress = publicAddress
return proto.listen(fromAddress, protocolsToFactories.keys(),
serverDescription).addCallback(startup)
pubUDPDeferred.addCallback(_gotPubUDPPort)
return pubUDPDeferred
D.addCallback(_secured)
return D |
def recvall(self, timeout=0.5):
response =
self.socket.setblocking(False)
start = time.time()
while True:
if response and time.time() - start > timeout:
break
elif time.time() - start > timeout * 2:
break
try:
data = self.socket.recv(4096)
if data:
response += data.replace(self._rconreplystring, )
start = time.time()
else:
time.sleep(0.1)
except socket.error:
pass
return response.strip() | Receive the RCON command response
:param timeout: The timeout between consequent data receive
:return str: The RCON command response with header stripped out | ### Input:
Receive the RCON command response
:param timeout: The timeout between consequent data receive
:return str: The RCON command response with header stripped out
### Response:
def recvall(self, timeout=0.5):
response =
self.socket.setblocking(False)
start = time.time()
while True:
if response and time.time() - start > timeout:
break
elif time.time() - start > timeout * 2:
break
try:
data = self.socket.recv(4096)
if data:
response += data.replace(self._rconreplystring, )
start = time.time()
else:
time.sleep(0.1)
except socket.error:
pass
return response.strip() |
def mkdir(path, owner=, group=, perms=0o555, force=False):
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
os.makedirs(realpath, perms)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chmod(realpath, perms) | Create a directory | ### Input:
Create a directory
### Response:
def mkdir(path, owner=, group=, perms=0o555, force=False):
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
os.makedirs(realpath, perms)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chmod(realpath, perms) |
def parse(self, rev_string):
elements = rev_string.split(MESSAGE_LINE_SEPARATOR)
heading = elements[0]
heading_elements = heading.split(" ")
self.revision_id = heading_elements[2]
datetime_str = "{} {}".format(
heading_elements[0],
heading_elements[1]
)
self.release_date = datetime.datetime.strptime(
datetime_str,
DATETIME_FORMAT
)
self.description = elements[1]
self.message = elements[2] | :param rev_string:
:type rev_string: str | ### Input:
:param rev_string:
:type rev_string: str
### Response:
def parse(self, rev_string):
elements = rev_string.split(MESSAGE_LINE_SEPARATOR)
heading = elements[0]
heading_elements = heading.split(" ")
self.revision_id = heading_elements[2]
datetime_str = "{} {}".format(
heading_elements[0],
heading_elements[1]
)
self.release_date = datetime.datetime.strptime(
datetime_str,
DATETIME_FORMAT
)
self.description = elements[1]
self.message = elements[2] |
def import_txt(filename, **kwargs):
with open(filename, ) as fid:
text = fid.read()
strings_to_replace = {
: ,
: ,
}
for key in strings_to_replace.keys():
text = text.replace(key, strings_to_replace[key])
buffer = StringIO(text)
data_raw = pd.read_csv(
buffer,
delim_whitespace=True,
)
data_raw.columns = [x.strip() for x in data_raw.columns.tolist()]
data = _convert_coords_to_abmn_X(
data_raw[[, , , ]],
**kwargs
)
data[] = data_raw[] / data_raw[]
data[] = data_raw[]
data[] = data_raw[]
rec_max = kwargs.get(, None)
if rec_max is not None:
print()
data[[, , , ]] = rec_max + 1 - data[[, , , ]]
return data, None, None | Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself | ### Input:
Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
### Response:
def import_txt(filename, **kwargs):
with open(filename, ) as fid:
text = fid.read()
strings_to_replace = {
: ,
: ,
}
for key in strings_to_replace.keys():
text = text.replace(key, strings_to_replace[key])
buffer = StringIO(text)
data_raw = pd.read_csv(
buffer,
delim_whitespace=True,
)
data_raw.columns = [x.strip() for x in data_raw.columns.tolist()]
data = _convert_coords_to_abmn_X(
data_raw[[, , , ]],
**kwargs
)
data[] = data_raw[] / data_raw[]
data[] = data_raw[]
data[] = data_raw[]
rec_max = kwargs.get(, None)
if rec_max is not None:
print()
data[[, , , ]] = rec_max + 1 - data[[, , , ]]
return data, None, None |
def gruneisen_parameter(self, temperature, volume):
if isinstance(self.eos, PolynomialEOS):
p = np.poly1d(self.eos.eos_params)
dEdV = np.polyder(p, 1)(volume)
d2EdV2 = np.polyder(p, 2)(volume)
d3EdV3 = np.polyder(p, 3)(volume)
else:
func = self.ev_eos_fit.func
dEdV = derivative(func, volume, dx=1e-3)
d2EdV2 = derivative(func, volume, dx=1e-3, n=2, order=5)
d3EdV3 = derivative(func, volume, dx=1e-3, n=3, order=7)
if self.use_mie_gruneisen:
p0 = dEdV
return (self.gpa_to_ev_ang * volume *
(self.pressure + p0 / self.gpa_to_ev_ang) /
self.vibrational_internal_energy(temperature, volume))
dBdV = d2EdV2 + d3EdV3 * volume
return -(1./6. + 0.5 * volume * dBdV /
FloatWithUnit(self.ev_eos_fit.b0_GPa, "GPa").to("eV ang^-3")) | Slater-gamma formulation(the default):
gruneisen paramter = - d log(theta)/ d log(V)
= - ( 1/6 + 0.5 d log(B)/ d log(V) )
= - (1/6 + 0.5 V/B dB/dV),
where dB/dV = d^2E/dV^2 + V * d^3E/dV^3
Mie-gruneisen formulation:
Eq(31) in doi.org/10.1016/j.comphy.2003.12.001
Eq(7) in Blanco et. al. Joumal of Molecular Structure (Theochem)
368 (1996) 245-255
Also se J.P. Poirier, Introduction to the Physics of the Earth’s
Interior, 2nd ed. (Cambridge University Press, Cambridge,
2000) Eq(3.53)
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: unitless | ### Input:
Slater-gamma formulation(the default):
gruneisen paramter = - d log(theta)/ d log(V)
= - ( 1/6 + 0.5 d log(B)/ d log(V) )
= - (1/6 + 0.5 V/B dB/dV),
where dB/dV = d^2E/dV^2 + V * d^3E/dV^3
Mie-gruneisen formulation:
Eq(31) in doi.org/10.1016/j.comphy.2003.12.001
Eq(7) in Blanco et. al. Joumal of Molecular Structure (Theochem)
368 (1996) 245-255
Also se J.P. Poirier, Introduction to the Physics of the Earth’s
Interior, 2nd ed. (Cambridge University Press, Cambridge,
2000) Eq(3.53)
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: unitless
### Response:
def gruneisen_parameter(self, temperature, volume):
if isinstance(self.eos, PolynomialEOS):
p = np.poly1d(self.eos.eos_params)
dEdV = np.polyder(p, 1)(volume)
d2EdV2 = np.polyder(p, 2)(volume)
d3EdV3 = np.polyder(p, 3)(volume)
else:
func = self.ev_eos_fit.func
dEdV = derivative(func, volume, dx=1e-3)
d2EdV2 = derivative(func, volume, dx=1e-3, n=2, order=5)
d3EdV3 = derivative(func, volume, dx=1e-3, n=3, order=7)
if self.use_mie_gruneisen:
p0 = dEdV
return (self.gpa_to_ev_ang * volume *
(self.pressure + p0 / self.gpa_to_ev_ang) /
self.vibrational_internal_energy(temperature, volume))
dBdV = d2EdV2 + d3EdV3 * volume
return -(1./6. + 0.5 * volume * dBdV /
FloatWithUnit(self.ev_eos_fit.b0_GPa, "GPa").to("eV ang^-3")) |
def buttons_pressed(self, channel=1):
self._ensure_mode(self.MODE_IR_REMOTE)
channel = self._normalize_channel(channel)
return self._BUTTON_VALUES.get(self.value(channel), []) | Returns list of currently pressed buttons.
Note that the sensor can only identify up to two buttons pressed at once. | ### Input:
Returns list of currently pressed buttons.
Note that the sensor can only identify up to two buttons pressed at once.
### Response:
def buttons_pressed(self, channel=1):
self._ensure_mode(self.MODE_IR_REMOTE)
channel = self._normalize_channel(channel)
return self._BUTTON_VALUES.get(self.value(channel), []) |
def get_command_and_args(self):
r
p_call = self.call.replace(r, )
tab = p_call.split()
return tab[0].strip(), [s.replace(, ) for s in tab[1:]] | r"""We want to get the command and the args with ! splitting.
but don't forget to protect against the \! to avoid splitting on them
Remember: A Nagios-like command is command_name!arg1!arg2!...
:return: None | ### Input:
r"""We want to get the command and the args with ! splitting.
but don't forget to protect against the \! to avoid splitting on them
Remember: A Nagios-like command is command_name!arg1!arg2!...
:return: None
### Response:
def get_command_and_args(self):
r
p_call = self.call.replace(r, )
tab = p_call.split()
return tab[0].strip(), [s.replace(, ) for s in tab[1:]] |
def is_scalar(value):
return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0)) | Test if the given value is a scalar.
This function also works with memory mapped array values, in contrast to the numpy is_scalar method.
Args:
value: the value to test for being a scalar value
Returns:
boolean: if the given value is a scalar or not | ### Input:
Test if the given value is a scalar.
This function also works with memory mapped array values, in contrast to the numpy is_scalar method.
Args:
value: the value to test for being a scalar value
Returns:
boolean: if the given value is a scalar or not
### Response:
def is_scalar(value):
return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0)) |
def fro(self, statement):
if not self.name_format:
return self.fail_safe_fro(statement)
result = {}
for attribute in statement.attribute:
if attribute.name_format and self.name_format and \
attribute.name_format != self.name_format:
continue
try:
(key, val) = self.ava_from(attribute)
except (KeyError, AttributeError):
pass
else:
result[key] = val
return result | Get the attributes and the attribute values.
:param statement: The AttributeStatement.
:return: A dictionary containing attributes and values | ### Input:
Get the attributes and the attribute values.
:param statement: The AttributeStatement.
:return: A dictionary containing attributes and values
### Response:
def fro(self, statement):
if not self.name_format:
return self.fail_safe_fro(statement)
result = {}
for attribute in statement.attribute:
if attribute.name_format and self.name_format and \
attribute.name_format != self.name_format:
continue
try:
(key, val) = self.ava_from(attribute)
except (KeyError, AttributeError):
pass
else:
result[key] = val
return result |
def _setup_rpc(self):
endpoints = RpcCallBacks(self)
self.server = rpc.DfaRpcServer(self.ser_q, self._host,
self.cfg.dfa_rpc.transport_url,
endpoints,
exchange=constants.DFA_EXCHANGE) | Setup RPC server for dfa server. | ### Input:
Setup RPC server for dfa server.
### Response:
def _setup_rpc(self):
endpoints = RpcCallBacks(self)
self.server = rpc.DfaRpcServer(self.ser_q, self._host,
self.cfg.dfa_rpc.transport_url,
endpoints,
exchange=constants.DFA_EXCHANGE) |
def namedb_get_namespace_preorder( db, namespace_preorder_hash, current_block ):
cur = db.cursor()
select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;"
args = (namespace_preorder_hash, NAMESPACE_PREORDER, current_block + NAMESPACE_PREORDER_EXPIRE)
preorder_rows = namedb_query_execute( cur, select_query, args )
preorder_row = preorder_rows.fetchone()
if preorder_row is None:
return None
preorder_rec = {}
preorder_rec.update( preorder_row )
cur = db.cursor()
select_query = "SELECT preorder_hash FROM namespaces WHERE preorder_hash = ? AND ((op = ?) OR (op = ? AND reveal_block < ?));"
args = (namespace_preorder_hash, NAMESPACE_READY, NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE)
ns_rows = namedb_query_execute( cur, select_query, args )
ns_row = ns_rows.fetchone()
if ns_row is not None:
return None
return preorder_rec | Get a namespace preorder, given its hash.
Return the preorder record on success.
Return None if not found, or if it expired, or if the namespace was revealed or readied. | ### Input:
Get a namespace preorder, given its hash.
Return the preorder record on success.
Return None if not found, or if it expired, or if the namespace was revealed or readied.
### Response:
def namedb_get_namespace_preorder( db, namespace_preorder_hash, current_block ):
cur = db.cursor()
select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;"
args = (namespace_preorder_hash, NAMESPACE_PREORDER, current_block + NAMESPACE_PREORDER_EXPIRE)
preorder_rows = namedb_query_execute( cur, select_query, args )
preorder_row = preorder_rows.fetchone()
if preorder_row is None:
return None
preorder_rec = {}
preorder_rec.update( preorder_row )
cur = db.cursor()
select_query = "SELECT preorder_hash FROM namespaces WHERE preorder_hash = ? AND ((op = ?) OR (op = ? AND reveal_block < ?));"
args = (namespace_preorder_hash, NAMESPACE_READY, NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE)
ns_rows = namedb_query_execute( cur, select_query, args )
ns_row = ns_rows.fetchone()
if ns_row is not None:
return None
return preorder_rec |
def _format_default(client, value):
if isinstance(value, File):
return os.path.relpath(
str((client.workflow_path / value.path).resolve())
)
return value | Format default values. | ### Input:
Format default values.
### Response:
def _format_default(client, value):
if isinstance(value, File):
return os.path.relpath(
str((client.workflow_path / value.path).resolve())
)
return value |
def vectors(self, direction="all", failed=False):
if direction not in ["all", "incoming", "outgoing"]:
raise ValueError(
"{} is not a valid vector direction. "
"Must be all, incoming or outgoing.".format(direction))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
if direction == "all":
return Vector.query\
.filter(or_(Vector.destination_id == self.id,
Vector.origin_id == self.id))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id)\
.all()
else:
if direction == "all":
return Vector.query\
.filter(and_(Vector.failed == failed,
or_(Vector.destination_id == self.id,
Vector.origin_id == self.id)))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id, failed=failed)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id, failed=failed)\
.all() | Get vectors that connect at this node.
Direction can be "incoming", "outgoing" or "all" (default).
Failed can be True, False or all | ### Input:
Get vectors that connect at this node.
Direction can be "incoming", "outgoing" or "all" (default).
Failed can be True, False or all
### Response:
def vectors(self, direction="all", failed=False):
if direction not in ["all", "incoming", "outgoing"]:
raise ValueError(
"{} is not a valid vector direction. "
"Must be all, incoming or outgoing.".format(direction))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
if direction == "all":
return Vector.query\
.filter(or_(Vector.destination_id == self.id,
Vector.origin_id == self.id))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id)\
.all()
else:
if direction == "all":
return Vector.query\
.filter(and_(Vector.failed == failed,
or_(Vector.destination_id == self.id,
Vector.origin_id == self.id)))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id, failed=failed)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id, failed=failed)\
.all() |
def correct_penultimate_dactyl_chain(self, scansion: str) -> str:
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
n_vals = vals[:-7] + [self.constants.DACTYL + self.constants.DACTYL] + [vals[-1]]
corrected = "".join(n_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) | For pentameter the last two feet of the verse are predictable dactyls,
and do not regularly allow substitutions.
:param scansion: scansion line thus far
:return: corrected line of scansion
>>> print(PentameterScanner().correct_penultimate_dactyl_chain(
... "U U U U U U U U U U U U U U"))
U U U U U U U - U U - U U U | ### Input:
For pentameter the last two feet of the verse are predictable dactyls,
and do not regularly allow substitutions.
:param scansion: scansion line thus far
:return: corrected line of scansion
>>> print(PentameterScanner().correct_penultimate_dactyl_chain(
... "U U U U U U U U U U U U U U"))
U U U U U U U - U U - U U U
### Response:
def correct_penultimate_dactyl_chain(self, scansion: str) -> str:
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
n_vals = vals[:-7] + [self.constants.DACTYL + self.constants.DACTYL] + [vals[-1]]
corrected = "".join(n_vals)
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line) |
def init_map(self):
d = self.declaration
if d.show_location:
self.set_show_location(d.show_location)
if d.show_traffic:
self.set_show_traffic(d.show_traffic)
if d.show_indoors:
self.set_show_indoors(d.show_indoors)
if d.show_buildings:
self.set_show_buildings(d.show_buildings)
mapview = self.map
mid = mapview.getId()
mapview.onCameraChange.connect(self.on_camera_changed)
mapview.onCameraMoveStarted.connect(self.on_camera_move_started)
mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped)
mapview.onCameraIdle.connect(self.on_camera_move_stopped)
mapview.setOnCameraChangeListener(mid)
mapview.setOnCameraMoveStartedListener(mid)
mapview.setOnCameraMoveCanceledListener(mid)
mapview.setOnCameraIdleListener(mid)
mapview.onMapClick.connect(self.on_map_clicked)
mapview.setOnMapClickListener(mid)
mapview.onMapLongClick.connect(self.on_map_long_clicked)
mapview.setOnMapLongClickListener(mid)
mapview.onMarkerClick.connect(self.on_marker_clicked)
mapview.setOnMarkerClickListener(self.map.getId())
mapview.onMarkerDragStart.connect(self.on_marker_drag_start)
mapview.onMarkerDrag.connect(self.on_marker_drag)
mapview.onMarkerDragEnd.connect(self.on_marker_drag_end)
mapview.setOnMarkerDragListener(mid)
mapview.onInfoWindowClick.connect(self.on_info_window_clicked)
mapview.onInfoWindowLongClick.connect(self.on_info_window_long_clicked)
mapview.onInfoWindowClose.connect(self.on_info_window_closed)
mapview.setOnInfoWindowClickListener(mid)
mapview.setOnInfoWindowCloseListener(mid)
mapview.setOnInfoWindowLongClickListener(mid)
mapview.onPolygonClick.connect(self.on_poly_clicked)
mapview.onPolylineClick.connect(self.on_poly_clicked)
mapview.setOnPolygonClickListener(mid)
mapview.setOnPolylineClickListener(mid)
mapview.onCircleClick.connect(self.on_circle_clicked)
mapview.setOnCircleClickListener(mid) | Add markers, polys, callouts, etc.. | ### Input:
Add markers, polys, callouts, etc..
### Response:
def init_map(self):
d = self.declaration
if d.show_location:
self.set_show_location(d.show_location)
if d.show_traffic:
self.set_show_traffic(d.show_traffic)
if d.show_indoors:
self.set_show_indoors(d.show_indoors)
if d.show_buildings:
self.set_show_buildings(d.show_buildings)
mapview = self.map
mid = mapview.getId()
mapview.onCameraChange.connect(self.on_camera_changed)
mapview.onCameraMoveStarted.connect(self.on_camera_move_started)
mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped)
mapview.onCameraIdle.connect(self.on_camera_move_stopped)
mapview.setOnCameraChangeListener(mid)
mapview.setOnCameraMoveStartedListener(mid)
mapview.setOnCameraMoveCanceledListener(mid)
mapview.setOnCameraIdleListener(mid)
mapview.onMapClick.connect(self.on_map_clicked)
mapview.setOnMapClickListener(mid)
mapview.onMapLongClick.connect(self.on_map_long_clicked)
mapview.setOnMapLongClickListener(mid)
mapview.onMarkerClick.connect(self.on_marker_clicked)
mapview.setOnMarkerClickListener(self.map.getId())
mapview.onMarkerDragStart.connect(self.on_marker_drag_start)
mapview.onMarkerDrag.connect(self.on_marker_drag)
mapview.onMarkerDragEnd.connect(self.on_marker_drag_end)
mapview.setOnMarkerDragListener(mid)
mapview.onInfoWindowClick.connect(self.on_info_window_clicked)
mapview.onInfoWindowLongClick.connect(self.on_info_window_long_clicked)
mapview.onInfoWindowClose.connect(self.on_info_window_closed)
mapview.setOnInfoWindowClickListener(mid)
mapview.setOnInfoWindowCloseListener(mid)
mapview.setOnInfoWindowLongClickListener(mid)
mapview.onPolygonClick.connect(self.on_poly_clicked)
mapview.onPolylineClick.connect(self.on_poly_clicked)
mapview.setOnPolygonClickListener(mid)
mapview.setOnPolylineClickListener(mid)
mapview.onCircleClick.connect(self.on_circle_clicked)
mapview.setOnCircleClickListener(mid) |
def get_grade_system_lookup_session(self, proxy):
if not self.supports_grade_system_lookup():
raise errors.Unimplemented()
return sessions.GradeSystemLookupSession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the grade system lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemLookupSession) - a
``GradeSystemLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` is ``true``.* | ### Input:
Gets the ``OsidSession`` associated with the grade system lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemLookupSession) - a
``GradeSystemLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` is ``true``.*
### Response:
def get_grade_system_lookup_session(self, proxy):
if not self.supports_grade_system_lookup():
raise errors.Unimplemented()
return sessions.GradeSystemLookupSession(proxy=proxy, runtime=self._runtime) |
def get_and_cache_account(self, addr):
if addr in self.cache:
return self.cache[addr]
rlpdata = self.secure_trie.get(addr)
if (
rlpdata == trie.BLANK_NODE and len(addr) == 32
):
rlpdata = self.trie.get(addr)
if rlpdata != trie.BLANK_NODE:
o = rlp.decode(rlpdata, Account, db=self.db, addr=addr)
else:
o = Account.blank_account(self.db, addr, 0)
self.cache[addr] = o
o._mutable = True
o._cached_rlp = None
return o | Gets and caches an account for an addres, creates blank if not
found.
:param addr:
:return: | ### Input:
Gets and caches an account for an addres, creates blank if not
found.
:param addr:
:return:
### Response:
def get_and_cache_account(self, addr):
if addr in self.cache:
return self.cache[addr]
rlpdata = self.secure_trie.get(addr)
if (
rlpdata == trie.BLANK_NODE and len(addr) == 32
):
rlpdata = self.trie.get(addr)
if rlpdata != trie.BLANK_NODE:
o = rlp.decode(rlpdata, Account, db=self.db, addr=addr)
else:
o = Account.blank_account(self.db, addr, 0)
self.cache[addr] = o
o._mutable = True
o._cached_rlp = None
return o |
def setText(self, sequence):
self.setToolTip(sequence)
super(ShortcutLineEdit, self).setText(sequence) | Qt method extension. | ### Input:
Qt method extension.
### Response:
def setText(self, sequence):
self.setToolTip(sequence)
super(ShortcutLineEdit, self).setText(sequence) |
def fetch(url, fullpath):
logger.debug("Fetching %s from %s" % (fullpath, url))
try:
tmpfile, headers = urlretrieve(url)
if os.path.exists(fullpath):
os.remove(fullpath)
shutil.move(tmpfile, fullpath)
except Exception as e:
logger.error("Error during fetching: " + str(e))
raise | Fetch data from an URL and save it under the given target name. | ### Input:
Fetch data from an URL and save it under the given target name.
### Response:
def fetch(url, fullpath):
logger.debug("Fetching %s from %s" % (fullpath, url))
try:
tmpfile, headers = urlretrieve(url)
if os.path.exists(fullpath):
os.remove(fullpath)
shutil.move(tmpfile, fullpath)
except Exception as e:
logger.error("Error during fetching: " + str(e))
raise |
def render_crispy_form(form, helper=None, context=None):
from crispy_forms.templatetags.crispy_forms_tags import CrispyFormNode
if helper is not None:
node = CrispyFormNode(, )
else:
node = CrispyFormNode(, None)
node_context = Context(context)
node_context.update({
: form,
: helper
})
return node.render(node_context) | Renders a form and returns its HTML output.
This function wraps the template logic in a function easy to use in a Django view. | ### Input:
Renders a form and returns its HTML output.
This function wraps the template logic in a function easy to use in a Django view.
### Response:
def render_crispy_form(form, helper=None, context=None):
from crispy_forms.templatetags.crispy_forms_tags import CrispyFormNode
if helper is not None:
node = CrispyFormNode(, )
else:
node = CrispyFormNode(, None)
node_context = Context(context)
node_context.update({
: form,
: helper
})
return node.render(node_context) |
def input_relative_images(input_path, image_destination, rootname, config):
log.debug()
input_dirname = os.path.dirname(input_path)
for path in config.input_relative_images:
if in path:
path = path.replace(, rootname)
log.debug(.format(path))
images = os.path.normpath(os.path.join(input_dirname, path))
if os.path.isdir(images):
log.info(.format(images))
shutil.copytree(images, image_destination)
return True
return False | The method used to handle Input-Relative image inclusion. | ### Input:
The method used to handle Input-Relative image inclusion.
### Response:
def input_relative_images(input_path, image_destination, rootname, config):
log.debug()
input_dirname = os.path.dirname(input_path)
for path in config.input_relative_images:
if in path:
path = path.replace(, rootname)
log.debug(.format(path))
images = os.path.normpath(os.path.join(input_dirname, path))
if os.path.isdir(images):
log.info(.format(images))
shutil.copytree(images, image_destination)
return True
return False |
def is_running(self):
self.__update_status()
return self.status == Status.UP or self.status == Status.DECOMMISSIONED | Return true if the node is running | ### Input:
Return true if the node is running
### Response:
def is_running(self):
self.__update_status()
return self.status == Status.UP or self.status == Status.DECOMMISSIONED |
def serialize(self):
import datetime
items = []
time_begin = datetime.datetime.now()
for href in self.list():
items.append(self.get(href).item)
time_end = datetime.datetime.now()
self.logger.info(
"Collection read %d items in %s sec from %s", len(items),
(time_end - time_begin).total_seconds(), self.path)
if self.get_meta("tag") == "VCALENDAR":
collection = vobject.iCalendar()
for item in items:
for content in ("vevent", "vtodo", "vjournal"):
if content in item.contents:
for item_part in getattr(item, "%s_list" % content):
collection.add(item_part)
break
return collection.serialize()
elif self.get_meta("tag") == "VADDRESSBOOK":
return "".join([item.serialize() for item in items])
return "" | Get the unicode string representing the whole collection. | ### Input:
Get the unicode string representing the whole collection.
### Response:
def serialize(self):
import datetime
items = []
time_begin = datetime.datetime.now()
for href in self.list():
items.append(self.get(href).item)
time_end = datetime.datetime.now()
self.logger.info(
"Collection read %d items in %s sec from %s", len(items),
(time_end - time_begin).total_seconds(), self.path)
if self.get_meta("tag") == "VCALENDAR":
collection = vobject.iCalendar()
for item in items:
for content in ("vevent", "vtodo", "vjournal"):
if content in item.contents:
for item_part in getattr(item, "%s_list" % content):
collection.add(item_part)
break
return collection.serialize()
elif self.get_meta("tag") == "VADDRESSBOOK":
return "".join([item.serialize() for item in items])
return "" |
def get_asset_ddos():
args = []
query = dict()
args.append(query)
assets_with_id = dao.get_all_listed_assets()
assets_metadata = {a[]: a for a in assets_with_id}
for i in assets_metadata:
_sanitize_record(i)
return Response(json.dumps(assets_metadata, default=_my_converter), 200,
content_type=) | Get DDO of all assets.
---
tags:
- ddo
responses:
200:
description: successful action | ### Input:
Get DDO of all assets.
---
tags:
- ddo
responses:
200:
description: successful action
### Response:
def get_asset_ddos():
args = []
query = dict()
args.append(query)
assets_with_id = dao.get_all_listed_assets()
assets_metadata = {a[]: a for a in assets_with_id}
for i in assets_metadata:
_sanitize_record(i)
return Response(json.dumps(assets_metadata, default=_my_converter), 200,
content_type=) |
def do_allowaccess(self, line):
subject, permission = self._split_args(line, 1, 1)
self._command_processor.get_session().get_access_control().add_allowed_subject(
subject, permission
)
self._print_info_if_verbose(
.format(permission, subject)
) | allowaccess <subject> [access-level] Set the access level for subject Access
level is "read", "write" or "changePermission".
Access level defaults to "read" if not specified. Special subjects: public:
Any subject, authenticated and not authenticated authenticatedUser: Any
subject that has authenticated with CILogon verifiedUser: Any subject that has
authenticated with CILogon and has been verified by DataONE | ### Input:
allowaccess <subject> [access-level] Set the access level for subject Access
level is "read", "write" or "changePermission".
Access level defaults to "read" if not specified. Special subjects: public:
Any subject, authenticated and not authenticated authenticatedUser: Any
subject that has authenticated with CILogon verifiedUser: Any subject that has
authenticated with CILogon and has been verified by DataONE
### Response:
def do_allowaccess(self, line):
subject, permission = self._split_args(line, 1, 1)
self._command_processor.get_session().get_access_control().add_allowed_subject(
subject, permission
)
self._print_info_if_verbose(
.format(permission, subject)
) |
def get_backend_tfvars_file(path, environment, region):
backend_filenames = gen_backend_tfvars_files(environment, region)
for name in backend_filenames:
if os.path.isfile(os.path.join(path, name)):
return name
return backend_filenames[-1] | Determine Terraform backend file. | ### Input:
Determine Terraform backend file.
### Response:
def get_backend_tfvars_file(path, environment, region):
backend_filenames = gen_backend_tfvars_files(environment, region)
for name in backend_filenames:
if os.path.isfile(os.path.join(path, name)):
return name
return backend_filenames[-1] |
def get_groups(self, env, token):
groups = None
memcache_client = cache_from_env(env)
if memcache_client:
memcache_key = % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
s3_auth_details = env.get()
if s3_auth_details:
if not self.s3_support:
self.logger.warning()
return None
if self.swauth_remote:
self.logger.warning(
)
return None
try:
account, user = s3_auth_details[].split(, 1)
signature_from_user = s3_auth_details[]
msg = s3_auth_details[]
except Exception:
self.logger.debug(
%
(s3_auth_details, ))
return None
path = quote( % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
if in resp.headers:
account_id = resp.headers[]
else:
path = quote( % (self.auth_account, account))
resp2 = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp2.status_int // 100 != 2:
return None
account_id = resp2.headers[]
path = env[]
env[] = path.replace("%s:%s" % (account, user),
account_id, 1)
detail = json.loads(resp.body)
if detail:
creds = detail.get()
try:
auth_encoder, creds_dict = \
swauth.authtypes.validate_creds(creds)
except ValueError as e:
self.logger.error( % e.args[0])
return None
password = creds_dict[]
if isinstance(password, six.text_type):
password = password.encode()
if isinstance(msg, six.text_type):
msg = msg.encode()
valid_signature = base64.encodestring(hmac.new(
password, msg, sha1).digest()).strip()
if signature_from_user != valid_signature:
return None
groups = [g[] for g in detail[]]
if in groups:
groups.remove()
groups.append(account_id)
groups = .join(groups)
return groups
if not groups:
if self.swauth_remote:
with Timeout(self.swauth_remote_timeout):
conn = http_connect(self.swauth_remote_parsed.hostname,
self.swauth_remote_parsed.port, ,
% (self.swauth_remote_parsed.path,
quote(token)),
ssl=(self.swauth_remote_parsed.scheme == ))
resp = conn.getresponse()
resp.read()
conn.close()
if resp.status // 100 != 2:
return None
expires_from_now = float(resp.getheader())
groups = resp.getheader()
if memcache_client:
memcache_client.set(
memcache_key, (time() + expires_from_now, groups),
time=expires_from_now)
else:
object_name = self._get_concealed_token(token)
path = quote( %
(self.auth_account, object_name[-1], object_name))
resp = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
detail = json.loads(resp.body)
if detail[] < time():
self.make_pre_authed_request(
env, , path).get_response(self.app)
return None
groups = [g[] for g in detail[]]
if in groups:
groups.remove()
groups.append(detail[])
groups = .join(groups)
if memcache_client:
memcache_client.set(
memcache_key,
(detail[], groups),
time=float(detail[] - time()))
return groups | Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user. | ### Input:
Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
### Response:
def get_groups(self, env, token):
groups = None
memcache_client = cache_from_env(env)
if memcache_client:
memcache_key = % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
s3_auth_details = env.get()
if s3_auth_details:
if not self.s3_support:
self.logger.warning()
return None
if self.swauth_remote:
self.logger.warning(
)
return None
try:
account, user = s3_auth_details[].split(, 1)
signature_from_user = s3_auth_details[]
msg = s3_auth_details[]
except Exception:
self.logger.debug(
%
(s3_auth_details, ))
return None
path = quote( % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
if in resp.headers:
account_id = resp.headers[]
else:
path = quote( % (self.auth_account, account))
resp2 = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp2.status_int // 100 != 2:
return None
account_id = resp2.headers[]
path = env[]
env[] = path.replace("%s:%s" % (account, user),
account_id, 1)
detail = json.loads(resp.body)
if detail:
creds = detail.get()
try:
auth_encoder, creds_dict = \
swauth.authtypes.validate_creds(creds)
except ValueError as e:
self.logger.error( % e.args[0])
return None
password = creds_dict[]
if isinstance(password, six.text_type):
password = password.encode()
if isinstance(msg, six.text_type):
msg = msg.encode()
valid_signature = base64.encodestring(hmac.new(
password, msg, sha1).digest()).strip()
if signature_from_user != valid_signature:
return None
groups = [g[] for g in detail[]]
if in groups:
groups.remove()
groups.append(account_id)
groups = .join(groups)
return groups
if not groups:
if self.swauth_remote:
with Timeout(self.swauth_remote_timeout):
conn = http_connect(self.swauth_remote_parsed.hostname,
self.swauth_remote_parsed.port, ,
% (self.swauth_remote_parsed.path,
quote(token)),
ssl=(self.swauth_remote_parsed.scheme == ))
resp = conn.getresponse()
resp.read()
conn.close()
if resp.status // 100 != 2:
return None
expires_from_now = float(resp.getheader())
groups = resp.getheader()
if memcache_client:
memcache_client.set(
memcache_key, (time() + expires_from_now, groups),
time=expires_from_now)
else:
object_name = self._get_concealed_token(token)
path = quote( %
(self.auth_account, object_name[-1], object_name))
resp = self.make_pre_authed_request(
env, , path).get_response(self.app)
if resp.status_int // 100 != 2:
return None
detail = json.loads(resp.body)
if detail[] < time():
self.make_pre_authed_request(
env, , path).get_response(self.app)
return None
groups = [g[] for g in detail[]]
if in groups:
groups.remove()
groups.append(detail[])
groups = .join(groups)
if memcache_client:
memcache_client.set(
memcache_key,
(detail[], groups),
time=float(detail[] - time()))
return groups |
def _delete_node_storage(self, node, is_root=False):
if node == BLANK_NODE:
return
encoded = rlp_encode(node)
if len(encoded) < 32 and not is_root:
return
hashkey = utils.sha3(encoded)
self.db.dec_refcount(hashkey) | delete storage
:param node: node in form of list, or BLANK_NODE | ### Input:
delete storage
:param node: node in form of list, or BLANK_NODE
### Response:
def _delete_node_storage(self, node, is_root=False):
if node == BLANK_NODE:
return
encoded = rlp_encode(node)
if len(encoded) < 32 and not is_root:
return
hashkey = utils.sha3(encoded)
self.db.dec_refcount(hashkey) |
def cublasZtpmv(handle, uplo, trans, diag, n, AP, x, incx):
status = _libcublas.cublasZtpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status) | Matrix-vector product for complex triangular-packed matrix. | ### Input:
Matrix-vector product for complex triangular-packed matrix.
### Response:
def cublasZtpmv(handle, uplo, trans, diag, n, AP, x, incx):
status = _libcublas.cublasZtpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status) |
def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.xpaths is not None:
_dict[] = self.xpaths
return _dict | Return a json dictionary representing this model. | ### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.xpaths is not None:
_dict[] = self.xpaths
return _dict |
def fix_addresses(start=None, end=None):
if start in (None, idaapi.BADADDR):
start = idaapi.cvar.inf.minEA
if end in (None, idaapi.BADADDR):
end = idaapi.cvar.inf.maxEA
return start, end | Set missing addresses to start and end of IDB.
Take a start and end addresses. If an address is None or `BADADDR`,
return start or end addresses of the IDB instead.
Args
start: Start EA. Use `None` to get IDB start.
end: End EA. Use `None` to get IDB end.
Returns:
(start, end) | ### Input:
Set missing addresses to start and end of IDB.
Take a start and end addresses. If an address is None or `BADADDR`,
return start or end addresses of the IDB instead.
Args
start: Start EA. Use `None` to get IDB start.
end: End EA. Use `None` to get IDB end.
Returns:
(start, end)
### Response:
def fix_addresses(start=None, end=None):
if start in (None, idaapi.BADADDR):
start = idaapi.cvar.inf.minEA
if end in (None, idaapi.BADADDR):
end = idaapi.cvar.inf.maxEA
return start, end |
def absent(name, version=-1, recursive=False, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: )
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True
nameresultcommentFailed to delete znode {0}changesprofilehostsschemeusernamepassworddefault_aclzookeeper.existsresultcommentZnode {0} does not existvaluezookeeper.getaclszookeeper.get_aclschildrenzookeeper.get_childrentestresultcommentZnode {0} will be removedchangesoldzookeeper.deletezookeeper.existsresultcommentZnode {0} has been removedchangesold'] = changes
return ret | Make sure znode is absent
name
path to znode
version
Specify the version which should be deleted
Default: -1 (always match)
recursive
Boolean to indicate if children should be recursively deleted
Default: False
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True | ### Input:
Make sure znode is absent
name
path to znode
version
Specify the version which should be deleted
Default: -1 (always match)
recursive
Boolean to indicate if children should be recursively deleted
Default: False
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True
### Response:
def absent(name, version=-1, recursive=False, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: )
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True
nameresultcommentFailed to delete znode {0}changesprofilehostsschemeusernamepassworddefault_aclzookeeper.existsresultcommentZnode {0} does not existvaluezookeeper.getaclszookeeper.get_aclschildrenzookeeper.get_childrentestresultcommentZnode {0} will be removedchangesoldzookeeper.deletezookeeper.existsresultcommentZnode {0} has been removedchangesold'] = changes
return ret |
def update_security_marks(
self,
security_marks,
update_mask=None,
start_time=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "update_security_marks" not in self._inner_api_calls:
self._inner_api_calls[
"update_security_marks"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_security_marks,
default_retry=self._method_configs["UpdateSecurityMarks"].retry,
default_timeout=self._method_configs["UpdateSecurityMarks"].timeout,
client_info=self._client_info,
)
request = securitycenter_service_pb2.UpdateSecurityMarksRequest(
security_marks=security_marks,
update_mask=update_mask,
start_time=start_time,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("security_marks.name", security_marks.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_security_marks"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Updates security marks.
Example:
>>> from google.cloud import securitycenter_v1
>>>
>>> client = securitycenter_v1.SecurityCenterClient()
>>>
>>> # TODO: Initialize `security_marks`:
>>> security_marks = {}
>>>
>>> response = client.update_security_marks(security_marks)
Args:
security_marks (Union[dict, ~google.cloud.securitycenter_v1.types.SecurityMarks]): The security marks resource to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.SecurityMarks`
update_mask (Union[dict, ~google.cloud.securitycenter_v1.types.FieldMask]): The FieldMask to use when updating the security marks resource.
The field mask must not contain duplicate fields. If empty or set to
"marks", all marks will be replaced. Individual marks can be updated
using "marks.<mark\_key>".
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.FieldMask`
start_time (Union[dict, ~google.cloud.securitycenter_v1.types.Timestamp]): The time at which the updated SecurityMarks take effect.
If not set uses current server time. Updates will be applied to the
SecurityMarks that are active immediately preceding this time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.Timestamp`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.securitycenter_v1.types.SecurityMarks` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | ### Input:
Updates security marks.
Example:
>>> from google.cloud import securitycenter_v1
>>>
>>> client = securitycenter_v1.SecurityCenterClient()
>>>
>>> # TODO: Initialize `security_marks`:
>>> security_marks = {}
>>>
>>> response = client.update_security_marks(security_marks)
Args:
security_marks (Union[dict, ~google.cloud.securitycenter_v1.types.SecurityMarks]): The security marks resource to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.SecurityMarks`
update_mask (Union[dict, ~google.cloud.securitycenter_v1.types.FieldMask]): The FieldMask to use when updating the security marks resource.
The field mask must not contain duplicate fields. If empty or set to
"marks", all marks will be replaced. Individual marks can be updated
using "marks.<mark\_key>".
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.FieldMask`
start_time (Union[dict, ~google.cloud.securitycenter_v1.types.Timestamp]): The time at which the updated SecurityMarks take effect.
If not set uses current server time. Updates will be applied to the
SecurityMarks that are active immediately preceding this time.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.securitycenter_v1.types.Timestamp`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.securitycenter_v1.types.SecurityMarks` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def update_security_marks(
self,
security_marks,
update_mask=None,
start_time=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "update_security_marks" not in self._inner_api_calls:
self._inner_api_calls[
"update_security_marks"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_security_marks,
default_retry=self._method_configs["UpdateSecurityMarks"].retry,
default_timeout=self._method_configs["UpdateSecurityMarks"].timeout,
client_info=self._client_info,
)
request = securitycenter_service_pb2.UpdateSecurityMarksRequest(
security_marks=security_marks,
update_mask=update_mask,
start_time=start_time,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("security_marks.name", security_marks.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_security_marks"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def get_modis_tile_list(ds):
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list | Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html | ### Input:
Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
### Response:
def get_modis_tile_list(ds):
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list |
def extract_relation_instance(cls, field_name, field, resource_instance, serializer):
relation_instance = None
try:
relation_instance = getattr(resource_instance, field_name)
except AttributeError:
try:
relation_instance = getattr(resource_instance, field.child_relation.source)
except AttributeError:
if hasattr(serializer, field.source):
serializer_method = getattr(serializer, field.source)
relation_instance = serializer_method(resource_instance)
else:
try:
relation_instance = getattr(resource_instance, field.source)
except AttributeError:
pass
return relation_instance | Determines what instance represents given relation and extracts it.
Relation instance is determined by given field_name or source configured on
field. As fallback is a serializer method called with name of field's source. | ### Input:
Determines what instance represents given relation and extracts it.
Relation instance is determined by given field_name or source configured on
field. As fallback is a serializer method called with name of field's source.
### Response:
def extract_relation_instance(cls, field_name, field, resource_instance, serializer):
relation_instance = None
try:
relation_instance = getattr(resource_instance, field_name)
except AttributeError:
try:
relation_instance = getattr(resource_instance, field.child_relation.source)
except AttributeError:
if hasattr(serializer, field.source):
serializer_method = getattr(serializer, field.source)
relation_instance = serializer_method(resource_instance)
else:
try:
relation_instance = getattr(resource_instance, field.source)
except AttributeError:
pass
return relation_instance |
def delta_e_cie1994(lab_color_vector, lab_color_matrix,
K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
S_L = 1
S_C = 1 + K_1 * C_1
S_H = 1 + K_2 * C_1
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[K_L * S_L], [K_C * S_C], [K_H * S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0)) | Calculates the Delta E (CIE1994) of two colors.
K_l:
0.045 graphic arts
0.048 textiles
K_2:
0.015 graphic arts
0.014 textiles
K_L:
1 default
2 textiles | ### Input:
Calculates the Delta E (CIE1994) of two colors.
K_l:
0.045 graphic arts
0.048 textiles
K_2:
0.015 graphic arts
0.014 textiles
K_L:
1 default
2 textiles
### Response:
def delta_e_cie1994(lab_color_vector, lab_color_matrix,
K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
S_L = 1
S_C = 1 + K_1 * C_1
S_H = 1 + K_2 * C_1
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[K_L * S_L], [K_C * S_C], [K_H * S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0)) |
def register(cls, plugin):
plugs = getattr(cls, % cls.__name__, None)
if plugs is None:
cls.loadPlugins()
plugs = getattr(cls, % cls.__name__, {})
if plugin.name() in plugs:
inst = plugs[plugin.name()]
if isinstance(inst, PluginProxy) and \
not isinstance(plugin, PluginProxy) and \
not inst._instance:
inst._instance = plugin
return True
return False
plugs[plugin.name()] = plugin
setattr(cls, % cls.__name__, plugs)
return True | Registers the given plugin instance to this system. If a plugin with
the same name is already registered, then this plugin will not take
effect. The first registered plugin is the one that is used.
:param plugin | <Plugin>
:return <bool> | ### Input:
Registers the given plugin instance to this system. If a plugin with
the same name is already registered, then this plugin will not take
effect. The first registered plugin is the one that is used.
:param plugin | <Plugin>
:return <bool>
### Response:
def register(cls, plugin):
plugs = getattr(cls, % cls.__name__, None)
if plugs is None:
cls.loadPlugins()
plugs = getattr(cls, % cls.__name__, {})
if plugin.name() in plugs:
inst = plugs[plugin.name()]
if isinstance(inst, PluginProxy) and \
not isinstance(plugin, PluginProxy) and \
not inst._instance:
inst._instance = plugin
return True
return False
plugs[plugin.name()] = plugin
setattr(cls, % cls.__name__, plugs)
return True |
def log_url (self, url_data):
self.writeln()
if self.has_part():
self.write_url(url_data)
if url_data.name and self.has_part():
self.write_name(url_data)
if url_data.parent_url and self.has_part():
self.write_parent(url_data)
if url_data.base_ref and self.has_part():
self.write_base(url_data)
if url_data.url and self.has_part():
self.write_real(url_data)
if url_data.checktime and self.has_part():
self.write_checktime(url_data)
if url_data.dltime >= 0 and self.has_part():
self.write_dltime(url_data)
if url_data.size >= 0 and self.has_part():
self.write_size(url_data)
if url_data.info and self.has_part():
self.write_info(url_data)
if url_data.modified and self.has_part():
self.write_modified(url_data)
if url_data.warnings and self.has_part():
self.write_warning(url_data)
if self.has_part():
self.write_result(url_data)
self.flush() | Write url checking info. | ### Input:
Write url checking info.
### Response:
def log_url (self, url_data):
self.writeln()
if self.has_part():
self.write_url(url_data)
if url_data.name and self.has_part():
self.write_name(url_data)
if url_data.parent_url and self.has_part():
self.write_parent(url_data)
if url_data.base_ref and self.has_part():
self.write_base(url_data)
if url_data.url and self.has_part():
self.write_real(url_data)
if url_data.checktime and self.has_part():
self.write_checktime(url_data)
if url_data.dltime >= 0 and self.has_part():
self.write_dltime(url_data)
if url_data.size >= 0 and self.has_part():
self.write_size(url_data)
if url_data.info and self.has_part():
self.write_info(url_data)
if url_data.modified and self.has_part():
self.write_modified(url_data)
if url_data.warnings and self.has_part():
self.write_warning(url_data)
if self.has_part():
self.write_result(url_data)
self.flush() |
def prepare(self):
agent_configs = []
if self.config:
agent_configs = self.config_manager.getconfig(
self.config, self.default_target)
for config in agent_configs:
if config[] in [, , ]:
client = self.clients[](
config, self.old_style_configs, kill_old=self.kill_old)
else:
client = self.clients[](
config, self.old_style_configs, timeout=5, kill_old=self.kill_old)
logger.debug(, client.host)
agent_config, startup_config, customs_script = client.install()
if agent_config:
self.agents.append(client)
self.artifact_files.append(agent_config)
if startup_config:
self.artifact_files.append(startup_config)
if customs_script:
self.artifact_files.append(customs_script) | Prepare for monitoring - install agents etc | ### Input:
Prepare for monitoring - install agents etc
### Response:
def prepare(self):
agent_configs = []
if self.config:
agent_configs = self.config_manager.getconfig(
self.config, self.default_target)
for config in agent_configs:
if config[] in [, , ]:
client = self.clients[](
config, self.old_style_configs, kill_old=self.kill_old)
else:
client = self.clients[](
config, self.old_style_configs, timeout=5, kill_old=self.kill_old)
logger.debug(, client.host)
agent_config, startup_config, customs_script = client.install()
if agent_config:
self.agents.append(client)
self.artifact_files.append(agent_config)
if startup_config:
self.artifact_files.append(startup_config)
if customs_script:
self.artifact_files.append(customs_script) |
def sanity_checks(self):
if self._config.state.sanity_checked:
return
log.info("Sanity checks: ".format(self._name))
HAS_DOCKER_PY = None
try:
from ansible.module_utils.docker_common import HAS_DOCKER_PY
except ImportError:
try:
from ansible.module_utils.docker.common import HAS_DOCKER_PY
except ImportError:
pass
if not HAS_DOCKER_PY:
msg = (
"install via or refer to "
)
sysexit_with_message(msg)
try:
import docker
import requests
docker_client = docker.from_env()
docker_client.ping()
except requests.exceptions.ConnectionError:
msg = (
)
sysexit_with_message(msg)
self._config.state.change_state(, True) | Implement Docker driver sanity checks. | ### Input:
Implement Docker driver sanity checks.
### Response:
def sanity_checks(self):
if self._config.state.sanity_checked:
return
log.info("Sanity checks: ".format(self._name))
HAS_DOCKER_PY = None
try:
from ansible.module_utils.docker_common import HAS_DOCKER_PY
except ImportError:
try:
from ansible.module_utils.docker.common import HAS_DOCKER_PY
except ImportError:
pass
if not HAS_DOCKER_PY:
msg = (
"install via or refer to "
)
sysexit_with_message(msg)
try:
import docker
import requests
docker_client = docker.from_env()
docker_client.ping()
except requests.exceptions.ConnectionError:
msg = (
)
sysexit_with_message(msg)
self._config.state.change_state(, True) |
def param_from_rasters(script, useDistanceWeight=True, useImgBorderWeight=True,
useAlphaWeight=False, cleanIsolatedTriangles=True,
stretchingAllowed=False, textureGutter=4):
filter_xml = .join([
,
,
% str(useDistanceWeight).lower(),
,
,
,
,
,
% str(useImgBorderWeight).lower(),
,
,
,
,
,
% str(useAlphaWeight).lower(),
,
,
,
,
,
% str(cleanIsolatedTriangles).lower(),
,
,
,
,
,
% str(stretchingAllowed).lower(),
,
,
,
,
,
% textureGutter,
,
,
,
,
])
util.write_filter(script, filter_xml)
return None | Set texture | ### Input:
Set texture
### Response:
def param_from_rasters(script, useDistanceWeight=True, useImgBorderWeight=True,
useAlphaWeight=False, cleanIsolatedTriangles=True,
stretchingAllowed=False, textureGutter=4):
filter_xml = .join([
,
,
% str(useDistanceWeight).lower(),
,
,
,
,
,
% str(useImgBorderWeight).lower(),
,
,
,
,
,
% str(useAlphaWeight).lower(),
,
,
,
,
,
% str(cleanIsolatedTriangles).lower(),
,
,
,
,
,
% str(stretchingAllowed).lower(),
,
,
,
,
,
% textureGutter,
,
,
,
,
])
util.write_filter(script, filter_xml)
return None |
def _auto_help_text(self, help_text):
api_doc_delimiter =
begin_api_doc = help_text.find(api_doc_delimiter)
if begin_api_doc >= 0:
end_api_doc = help_text.rfind(api_doc_delimiter) + len(api_doc_delimiter)
help_text = help_text[:begin_api_doc] + help_text[end_api_doc:]
an_prefix = (, , , )
if not self.resource_name.lower().startswith(an_prefix):
help_text = help_text.replace(,
% self.resource_name)
if self.resource_name.lower().endswith():
help_text = help_text.replace(
,
% self.resource_name[:-1],
)
help_text = help_text.replace(, self.resource_name)
help_text = help_text.replace(, )
help_text = help_text.replace(,
)
for match in re.findall(r, help_text):
option = % match.replace(, )
help_text = help_text.replace( % match, option)
return help_text | Given a method with a docstring, convert the docstring
to more CLI appropriate wording, and also disambiguate the
word "object" on the base class docstrings. | ### Input:
Given a method with a docstring, convert the docstring
to more CLI appropriate wording, and also disambiguate the
word "object" on the base class docstrings.
### Response:
def _auto_help_text(self, help_text):
api_doc_delimiter =
begin_api_doc = help_text.find(api_doc_delimiter)
if begin_api_doc >= 0:
end_api_doc = help_text.rfind(api_doc_delimiter) + len(api_doc_delimiter)
help_text = help_text[:begin_api_doc] + help_text[end_api_doc:]
an_prefix = (, , , )
if not self.resource_name.lower().startswith(an_prefix):
help_text = help_text.replace(,
% self.resource_name)
if self.resource_name.lower().endswith():
help_text = help_text.replace(
,
% self.resource_name[:-1],
)
help_text = help_text.replace(, self.resource_name)
help_text = help_text.replace(, )
help_text = help_text.replace(,
)
for match in re.findall(r, help_text):
option = % match.replace(, )
help_text = help_text.replace( % match, option)
return help_text |
def gut_message(message: Message) -> Message:
wrapper = Message()
wrapper.add_header(, ,
access_type=,
expiration=time.strftime("%a, %d %b %Y %H:%M:%S %z"),
size=str(len(message.get_payload())))
message.set_payload()
wrapper.set_payload([message])
return wrapper | Remove body from a message, and wrap in a message/external-body. | ### Input:
Remove body from a message, and wrap in a message/external-body.
### Response:
def gut_message(message: Message) -> Message:
wrapper = Message()
wrapper.add_header(, ,
access_type=,
expiration=time.strftime("%a, %d %b %Y %H:%M:%S %z"),
size=str(len(message.get_payload())))
message.set_payload()
wrapper.set_payload([message])
return wrapper |
def _compute_intra_event_std(self, C, vs30, pga1100, sigma_pga):
sig_lnyb = np.sqrt(C[] ** 2. - C[] ** 2.)
sig_lnab = np.sqrt(sigma_pga ** 2. - C[] ** 2.)
alpha = self._compute_intra_event_alpha(C, vs30, pga1100)
return np.sqrt(
(sig_lnyb ** 2.) +
(C[] ** 2.) +
((alpha ** 2.) * (sig_lnab ** 2.)) +
(2.0 * alpha * C[] * sig_lnyb * sig_lnab)) | Returns the intra-event standard deviation at the site, as defined in
equation 15, page 147 | ### Input:
Returns the intra-event standard deviation at the site, as defined in
equation 15, page 147
### Response:
def _compute_intra_event_std(self, C, vs30, pga1100, sigma_pga):
sig_lnyb = np.sqrt(C[] ** 2. - C[] ** 2.)
sig_lnab = np.sqrt(sigma_pga ** 2. - C[] ** 2.)
alpha = self._compute_intra_event_alpha(C, vs30, pga1100)
return np.sqrt(
(sig_lnyb ** 2.) +
(C[] ** 2.) +
((alpha ** 2.) * (sig_lnab ** 2.)) +
(2.0 * alpha * C[] * sig_lnyb * sig_lnab)) |
def _get_rule_cmds(self, sg_id, sg_rule, delete=False):
rule_prefix = ""
if delete:
rule_prefix = "no "
in_rules, eg_rules = self._format_rules_for_eos([sg_rule])
cmds = []
if in_rules:
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(rule_prefix + in_rule)
cmds.append("exit")
if eg_rules:
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(rule_prefix + eg_rule)
cmds.append("exit")
return cmds | Helper for getting add/delete ACL rule commands | ### Input:
Helper for getting add/delete ACL rule commands
### Response:
def _get_rule_cmds(self, sg_id, sg_rule, delete=False):
rule_prefix = ""
if delete:
rule_prefix = "no "
in_rules, eg_rules = self._format_rules_for_eos([sg_rule])
cmds = []
if in_rules:
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(rule_prefix + in_rule)
cmds.append("exit")
if eg_rules:
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(rule_prefix + eg_rule)
cmds.append("exit")
return cmds |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.