Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def patch_env(env, path, value):
""" Set specified value to yaml path.
Example:
patch('application/components/child/configuration/__locator.application-id','777')
Will change child app ID to 777
"""
def pathGet(dictionary, path):
for item in path.split("/"):
dictionary = dictionary[item]
return dictionary
def pathSet(dictionary, path, value):
path = path.split("/")
key = path[-1]
dictionary = pathGet(dictionary, "/".join(path[:-1]))
dictionary[key] = value
pathSet(env, path, value)
return True |
def get_starter_kit_meta(name):
"""
Extract metadata link for starter kit from platform configs. Starter kit available on add component - starter kit menu.
Beware, config could be changed by deploy scripts during deploy.
:param name: Name of starter kit
:return: Link to metadata
"""
kits = yaml.safe_load(requests.get(url=starter_kits_url).content)['kits']
kits_meta_url = [x['metaUrl'] for x in kits if x['name'] == name]
assert len(kits_meta_url)==1, "No component %s found in meta:\n %s" % (name, kits)
meta = yaml.safe_load(requests.get(url=kits_meta_url[0]).content)['download_url']
return meta |
def get_manifest_from_meta(metaurl, name):
"""
Extact manifest url from metadata url
:param metaurl: Url to metadata
:param name: Name of application to extract
:return:
"""
if 'http' in metaurl:
kit = yaml.safe_load(requests.get(url=metaurl).content)['kit']['applications']
else:
kit = yaml.safe_load(open(metaurl).read())['kit']['applications']
app_urls = [x['manifest'] for x in kit if x['name'] == name]
assert len(app_urls) == 1
return app_urls[0] |
def getPayloadStruct(self, attributes, objType=None):
""" Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param key: The key to modify
@param attribute: The data
@param objType: NOT USED in this class
@return RETURN: The API result
"""
if self.setInParentPayload:
return {self.parentPayloadObject:
{self.payloadObj: attributes}}
else:
return {self.payloadObj: attributes} |
def log(function):
""" Function log
Decorator to log lasts request before sending a new one
@return RETURN: None
"""
def _log(self, *args, **kwargs):
ret = function(self, *args, **kwargs)
if len(self.history) > self.maxHistory:
self.history = self.history[1:self.maxHistory]
self.history.append({'errorMsg': self.errorMsg,
'payload': self.payload,
'url': self.url,
'resp': self.resp,
'res': self.res,
'printErrors': self.printErrors,
'method': self.method})
self.clearReqVars()
return ret
return _log |
def clearReqVars(self):
""" Function clearHistVars
Clear the variables used to get history of all vars
@return RETURN: None
"""
self.errorMsg = None
self.payload = None
self.url = None
self.resp = None
self.res = None
self.method = None
self.printErrors = None |
def list(self, obj, filter=False, only_id=False, limit=20):
""" Function list
Get the list of an object
@param obj: object name ('hosts', 'puppetclasses'...)
@param filter: filter for objects
@param only_id: boolean to only return dict with name/id
@return RETURN: the list of the object
"""
self.url = '{}{}/?per_page={}'.format(self.base_url, obj, limit)
self.method = 'GET'
if filter:
self.url += '&search={}'.format(filter)
self.resp = requests.get(url=self.url, auth=self.auth,
headers=self.headers, cert=self.ca_cert)
if only_id:
if self.__process_resp__(obj) is False:
return False
if type(self.res['results']) is list:
return dict((x['name'], x['id']) for x in self.res['results'])
elif type(self.res['results']) is dict:
r = {}
for v in self.res['results'].values():
for vv in v:
r[vv['name']] = vv['id']
return r
else:
return False
else:
return self.__process_resp__(obj) |
def get(self, obj, id, sub_object=None):
""" Function get
Get an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the targeted object
"""
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'GET'
if sub_object:
self.url += '/' + sub_object
self.resp = requests.get(url=self.url, auth=self.auth,
headers=self.headers, cert=self.ca_cert)
if self.__process_resp__(obj):
return self.res
return False |
def get_id_by_name(self, obj, name):
""" Function get_id_by_name
Get the id of an object
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the targeted object
"""
list = self.list(obj, filter='name = "{}"'.format(name),
only_id=True, limit=1)
return list[name] if name in list.keys() else False |
def set(self, obj, id, payload, action='', async=False):
""" Function set
Set an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@param action: specific action of an object ('power'...)
@param payload: the dict of the payload
@param async: should this request be async, if true use
return.result() to get the response
@return RETURN: the server response
"""
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'PUT'
if action:
self.url += '/{}'.format(action)
self.payload = json.dumps(payload)
if async:
session = FuturesSession()
return session.put(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
else:
self.resp = requests.put(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
if self.__process_resp__(obj):
return self.res
return False |
def create(self, obj, payload, async=False):
""" Function create
Create an new object
@param obj: object name ('hosts', 'puppetclasses'...)
@param payload: the dict of the payload
@param async: should this request be async, if true use
return.result() to get the response
@return RETURN: the server response
"""
self.url = self.base_url + obj
self.method = 'POST'
self.payload = json.dumps(payload)
if async:
self.method = 'POST(Async)'
session = FuturesSession()
self.resp = session.post(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
return self.resp
else:
self.resp = requests.post(url=self.url, auth=self.auth,
headers=self.headers,
data=self.payload, cert=self.ca_cert)
return self.__process_resp__(obj) |
def delete(self, obj, id):
""" Function delete
Delete an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the server response
"""
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'DELETE'
self.resp = requests.delete(url=self.url,
auth=self.auth,
headers=self.headers, cert=self.ca_cert)
return self.__process_resp__(obj) |
def run(self):
"""Modified ``run`` that captures return value and exceptions from ``target``"""
try:
if self._target:
return_value = self._target(*self._args, **self._kwargs)
if return_value is not None:
self._exception = OrphanedReturn(self, return_value)
except BaseException as err:
self._exception = err
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs |
def _start_payloads(self):
"""Start all queued payloads"""
with self._lock:
payloads = self._payloads.copy()
self._payloads.clear()
for subroutine in payloads:
thread = CapturingThread(target=subroutine)
thread.start()
self._threads.add(thread)
self._logger.debug('booted thread %s', thread)
time.sleep(0) |
def _reap_payloads(self):
"""Clean up all finished payloads"""
for thread in self._threads.copy():
# CapturingThread.join will throw
if thread.join(timeout=0):
self._threads.remove(thread)
self._logger.debug('reaped thread %s', thread) |
def update_cache(func):
"""Decorate functions that modify the internally stored usernotes JSON.
Ensures that updates are mirrored onto reddit.
Arguments:
func: the function being decorated
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
"""The wrapper function."""
lazy = kwargs.get('lazy', False)
kwargs.pop('lazy', None)
if not lazy:
self.get_json()
ret = func(self, *args, **kwargs)
# If returning a string assume it is an update message
if isinstance(ret, str) and not lazy:
self.set_json(ret)
else:
return ret
return wrapper |
def s(cls: Type[C], *args, **kwargs) -> Partial[C]:
"""
Create an unbound prototype of this class, partially applying arguments
.. code:: python
controller = Controller.s(interval=20)
pipeline = controller(rate=10) >> pool
"""
return Partial(cls, *args, **kwargs) |
def _build_mappings(
self, classes: Sequence[type]
) -> Tuple[Mapping[type, Sequence[type]], Mapping[type, Sequence[type]]]:
"""
Collect all bases and organize into parent/child mappings.
"""
parents_to_children: MutableMapping[type, Set[type]] = {}
children_to_parents: MutableMapping[type, Set[type]] = {}
visited_classes: Set[type] = set()
class_stack = list(classes)
while class_stack:
class_ = class_stack.pop()
if class_ in visited_classes:
continue
visited_classes.add(class_)
for base in class_.__bases__:
if base not in visited_classes:
class_stack.append(base)
parents_to_children.setdefault(base, set()).add(class_)
children_to_parents.setdefault(class_, set()).add(base)
sorted_parents_to_children: MutableMapping[
type, List[type]
] = collections.OrderedDict()
for parent, children in sorted(
parents_to_children.items(), key=lambda x: (x[0].__module__, x[0].__name__)
):
sorted_parents_to_children[parent] = sorted(
children, key=lambda x: (x.__module__, x.__name__)
)
sorted_children_to_parents: MutableMapping[
type, List[type]
] = collections.OrderedDict()
for child, parents in sorted(
children_to_parents.items(), key=lambda x: (x[0].__module__, x[0].__name__)
):
sorted_children_to_parents[child] = sorted(
parents, key=lambda x: (x.__module__, x.__name__)
)
return sorted_parents_to_children, sorted_children_to_parents |
def _collect_classes(
self, package_paths: Sequence[str], recurse_subpackages: bool = True
) -> Sequence[type]:
"""
Collect all classes defined in/under ``package_paths``.
"""
import uqbar.apis
classes = []
initial_source_paths: Set[str] = set()
# Graph source paths and classes
for path in package_paths:
try:
module = importlib.import_module(path)
if hasattr(module, "__path__"):
initial_source_paths.update(getattr(module, "__path__"))
else:
initial_source_paths.add(module.__file__)
except ModuleNotFoundError:
path, _, class_name = path.rpartition(".")
module = importlib.import_module(path)
classes.append(getattr(module, class_name))
# Iterate source paths
for source_path in uqbar.apis.collect_source_paths(
initial_source_paths, recurse_subpackages=recurse_subpackages
):
package_path = uqbar.apis.source_path_to_package_path(source_path)
module = importlib.import_module(package_path)
# Grab any defined classes
for name in dir(module):
if name.startswith("_"):
continue
object_ = getattr(module, name)
if isinstance(object_, type) and object_.__module__ == module.__name__:
classes.append(object_)
return sorted(classes, key=lambda x: (x.__module__, x.__name__)) |
def get_auth():
"""Return a tuple for authenticating a user
If not successful raise ``AgileError``.
"""
auth = get_auth_from_env()
if auth[0] and auth[1]:
return auth
home = os.path.expanduser("~")
config = os.path.join(home, '.gitconfig')
if not os.path.isfile(config):
raise GithubException('No .gitconfig available')
parser = configparser.ConfigParser()
parser.read(config)
if 'user' in parser:
user = parser['user']
if 'username' not in user:
raise GithubException('Specify username in %s user '
'section' % config)
if 'token' not in user:
raise GithubException('Specify token in %s user section'
% config)
return user['username'], user['token']
else:
raise GithubException('No user section in %s' % config) |
def checkAndCreate(self, key, payload, osIds):
""" Function checkAndCreate
Check if an architectures exists and create it if not
@param key: The targeted architectures
@param payload: The targeted architectures description
@param osIds: The list of os ids liked with this architecture
@return RETURN: The id of the object
"""
if key not in self:
self[key] = payload
oid = self[key]['id']
if not oid:
return False
#~ To be sure the OS list is good, we ensure our os are in the list
for os in self[key]['operatingsystems']:
osIds.add(os['id'])
self[key]["operatingsystem_ids"] = list(osIds)
if (len(self[key]['operatingsystems']) is not len(osIds)):
return False
return oid |
def pip_command_output(pip_args):
"""
Get output (as a string) from pip command
:param pip_args: list o pip switches to pass
:return: string with results
"""
import sys
import pip
from io import StringIO
# as pip will write to stdout we use some nasty hacks
# to substitute system stdout with our own
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
pip.main(pip_args)
output = mystdout.getvalue()
mystdout.truncate(0)
sys.stdout = old_stdout
return output |
def setup_versioneer():
"""
Generate (temporarily) versioneer.py file in project root directory
:return:
"""
try:
# assume versioneer.py was generated using "versioneer install" command
import versioneer
versioneer.get_version()
except ImportError:
# it looks versioneer.py is missing
# lets assume that versioneer package is installed
# and versioneer binary is present in $PATH
import subprocess
try:
# call versioneer install to generate versioneer.py
subprocess.check_output(["versioneer", "install"])
except OSError:
# it looks versioneer is missing from $PATH
# probably versioneer is installed in some user directory
# query pip for list of files in versioneer package
# line below is equivalen to putting result of
# "pip show -f versioneer" command to string output
output = pip_command_output(["show", "-f", "versioneer"])
# now we parse the results
import os
# find absolute path where *versioneer package* was installed
# and store it in main_path
main_path = [x[len("Location: "):] for x in output.splitlines()
if x.startswith("Location")][0]
# find path relative to main_path where
# *versioneer binary* was installed
bin_path = [x[len(" "):] for x in output.splitlines()
if x.endswith(os.path.sep + "versioneer")][0]
# exe_path is absolute path to *versioneer binary*
exe_path = os.path.join(main_path, bin_path)
# call versioneer install to generate versioneer.py
# line below is equivalent to running in terminal
# "python versioneer install"
subprocess.check_output(["python", exe_path, "install"]) |
def clean_cache():
"""
Python won't realise that new module has appeared in the runtime
We need to clean the cache of module finders. Hacking again
:return:
"""
import importlib
try: # Python ver < 3.3
vermod = importlib.import_module("versioneer")
globals()["versioneer"] = vermod
except ImportError:
importlib.invalidate_caches() |
def get_version():
"""
Get project version (using versioneer)
:return: string containing version
"""
setup_versioneer()
clean_cache()
import versioneer
version = versioneer.get_version()
parsed_version = parse_version(version)
if '*@' in str(parsed_version):
import time
version += str(int(time.time()))
return version |
def find_common_prefix(
paths: Sequence[Union[str, pathlib.Path]]
) -> Optional[pathlib.Path]:
"""
Find the common prefix of two or more paths.
::
>>> import pathlib
>>> one = pathlib.Path('foo/bar/baz')
>>> two = pathlib.Path('foo/quux/biz')
>>> three = pathlib.Path('foo/quux/wuux')
::
>>> import uqbar.io
>>> str(uqbar.io.find_common_prefix([one, two, three]))
'foo'
:param paths: paths to inspect
"""
counter: collections.Counter = collections.Counter()
for path in paths:
path = pathlib.Path(path)
counter.update([path])
counter.update(path.parents)
valid_paths = sorted(
[path for path, count in counter.items() if count >= len(paths)],
key=lambda x: len(x.parts),
)
if valid_paths:
return valid_paths[-1]
return None |
def find_executable(name: str, flags=os.X_OK) -> List[str]:
r"""Finds executable `name`.
Similar to Unix ``which`` command.
Returns list of zero or more full paths to `name`.
"""
result = []
extensions = [x for x in os.environ.get("PATHEXT", "").split(os.pathsep) if x]
path = os.environ.get("PATH", None)
if path is None:
return []
for path in os.environ.get("PATH", "").split(os.pathsep):
path = os.path.join(path, name)
if os.access(path, flags):
result.append(path)
for extension in extensions:
path_extension = path + extension
if os.access(path_extension, flags):
result.append(path_extension)
return result |
def relative_to(
source_path: Union[str, pathlib.Path], target_path: Union[str, pathlib.Path]
) -> pathlib.Path:
"""
Generates relative path from ``source_path`` to ``target_path``.
Handles the case of paths without a common prefix.
::
>>> import pathlib
>>> source = pathlib.Path('foo/bar/baz')
>>> target = pathlib.Path('foo/quux/biz')
::
>>> target.relative_to(source)
Traceback (most recent call last):
...
ValueError: 'foo/quux/biz' does not start with 'foo/bar/baz'
::
>>> import uqbar.io
>>> str(uqbar.io.relative_to(source, target))
'../../quux/biz'
:param source_path: the source path
:param target_path: the target path
"""
source_path = pathlib.Path(source_path).absolute()
if source_path.is_file():
source_path = source_path.parent
target_path = pathlib.Path(target_path).absolute()
common_prefix = find_common_prefix([source_path, target_path])
if not common_prefix:
raise ValueError("No common prefix")
source_path = source_path.relative_to(common_prefix)
target_path = target_path.relative_to(common_prefix)
result = pathlib.Path(*[".."] * len(source_path.parts))
return result / target_path |
def walk(
root_path: Union[str, pathlib.Path], top_down: bool = True
) -> Generator[
Tuple[pathlib.Path, Sequence[pathlib.Path], Sequence[pathlib.Path]], None, None
]:
"""
Walks a directory tree.
Like :py:func:`os.walk` but yielding instances of :py:class:`pathlib.Path`
instead of strings.
:param root_path: foo
:param top_down: bar
"""
root_path = pathlib.Path(root_path)
directory_paths, file_paths = [], []
for path in sorted(root_path.iterdir()):
if path.is_dir():
directory_paths.append(path)
else:
file_paths.append(path)
if top_down:
yield root_path, directory_paths, file_paths
for directory_path in directory_paths:
yield from walk(directory_path, top_down=top_down)
if not top_down:
yield root_path, directory_paths, file_paths |
def write(
contents: str,
path: Union[str, pathlib.Path],
verbose: bool = False,
logger_func=None,
) -> bool:
"""
Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output
"""
print_func = logger_func or print
path = pathlib.Path(path)
if path.exists():
with path.open("r") as file_pointer:
old_contents = file_pointer.read()
if old_contents == contents:
if verbose:
print_func("preserved {}".format(path))
return False
else:
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("rewrote {}".format(path))
return True
elif not path.exists():
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("wrote {}".format(path))
return True |
def pretty_ref(obj: Any) -> str:
"""Pretty object reference using ``module.path:qual.name`` format"""
try:
return obj.__module__ + ':' + obj.__qualname__
except AttributeError:
return pretty_ref(type(obj)) + '(...)' |
def remote(ctx):
"""Display repo github path
"""
with command():
m = RepoManager(ctx.obj['agile'])
click.echo(m.github_repo().repo_path) |
def graph_order(self):
"""
Get graph-order tuple for node.
::
>>> from uqbar.containers import UniqueTreeContainer, UniqueTreeNode
>>> root_container = UniqueTreeContainer(name="root")
>>> outer_container = UniqueTreeContainer(name="outer")
>>> inner_container = UniqueTreeContainer(name="inner")
>>> node_a = UniqueTreeNode(name="a")
>>> node_b = UniqueTreeNode(name="b")
>>> node_c = UniqueTreeNode(name="c")
>>> node_d = UniqueTreeNode(name="d")
>>> root_container.extend([node_a, outer_container])
>>> outer_container.extend([inner_container, node_d])
>>> inner_container.extend([node_b, node_c])
::
>>> for node in root_container.depth_first():
... print(node.name, node.graph_order)
...
a (0,)
outer (1,)
inner (1, 0)
b (1, 0, 0)
c (1, 0, 1)
d (1, 1)
"""
parentage = tuple(reversed(self.parentage))
graph_order = []
for i in range(len(parentage) - 1):
parent, child = parentage[i : i + 2]
graph_order.append(parent.index(child))
return tuple(graph_order) |
def sendQuery(self, cmd, multilines=False):
""" Send command, wait for response (single or multi lines), test for errors and return the returned code.
:param cmd: command to send
:param multilines: True - multiline response, False - single line response.
:return: command return value.
"""
self.logger.debug("sendQuery(%s)", cmd)
if not self.is_connected():
raise socket.error("sendQuery on a disconnected socket")
if multilines:
replies = self.__sendQueryReplies(cmd)
for reply in replies:
if reply.startswith(XenaSocket.reply_errors):
raise XenaCommandException('sendQuery({}) reply({})'.format(cmd, replies))
self.logger.debug("sendQuery(%s) -- Begin", cmd)
for l in replies:
self.logger.debug("%s", l.strip())
self.logger.debug("sendQuery(%s) -- End", cmd)
return replies
else:
reply = self.__sendQueryReply(cmd)
if reply.startswith(XenaSocket.reply_errors):
raise XenaCommandException('sendQuery({}) reply({})'.format(cmd, reply))
self.logger.debug('sendQuery(%s) reply(%s)', cmd, reply)
return reply |
def sendQueryVerify(self, cmd):
""" Send command without return value, wait for completion, verify success.
:param cmd: command to send
"""
cmd = cmd.strip()
self.logger.debug("sendQueryVerify(%s)", cmd)
if not self.is_connected():
raise socket.error("sendQueryVerify on a disconnected socket")
resp = self.__sendQueryReply(cmd)
if resp != self.reply_ok:
raise XenaCommandException('Command {} Fail Expected {} Actual {}'.format(cmd, self.reply_ok, resp))
self.logger.debug("SendQueryVerify(%s) Succeed", cmd) |
def find_external_files(self, run_input_dir):
"""
Scan all SHIELDHIT12A config files to find external files used and return them.
Also change paths in config files to match convention that all resources are
symlinked in job_xxxx/symlink
"""
beam_file, geo_file, mat_file, _ = self.input_files
# check for external files in BEAM input file
external_beam_files = self._parse_beam_file(beam_file, run_input_dir)
if external_beam_files:
logger.info("External files from BEAM file: {0}".format(external_beam_files))
else:
logger.debug("No external files from BEAM file")
# check for external files in MAT input file
icru_numbers = self._parse_mat_file(mat_file)
if icru_numbers:
logger.info("External files from MAT file: {0}".format(icru_numbers))
else:
logger.debug("No external files from MAT file")
# if ICRU+LOADEX pairs were found - get file names for external material files
icru_files = []
if icru_numbers:
icru_files = self._decrypt_icru_files(icru_numbers)
# check for external files in GEO input file
geo_files = self._parse_geo_file(geo_file, run_input_dir)
if geo_files:
logger.info("External files from GEO file: {0}".format(geo_files))
else:
logger.debug("No external files from GEO file")
external_files = external_beam_files + icru_files + geo_files
return [os.path.join(self.input_path, e) for e in external_files] |
def _parse_beam_file(self, file_path, run_input_dir):
"""Scan SH12A BEAM file for references to external files and return them"""
external_files = []
paths_to_replace = []
with open(file_path, 'r') as beam_f:
for line in beam_f.readlines():
split_line = line.split()
# line length checking to prevent IndexError
if len(split_line) > 2 and split_line[0] == "USEBMOD":
logger.debug("Found reference to external file in BEAM file: {0} {1}".format(
split_line[0], split_line[2]))
external_files.append(split_line[2])
paths_to_replace.append(split_line[2])
elif len(split_line) > 1 and split_line[0] == "USECBEAM":
logger.debug("Found reference to external file in BEAM file: {0} {1}".format(
split_line[0], split_line[1]))
external_files.append(split_line[1])
paths_to_replace.append(split_line[1])
if paths_to_replace:
run_dir_config_file = os.path.join(run_input_dir, os.path.split(file_path)[-1])
logger.debug("Calling rewrite_paths method on file: {0}".format(run_dir_config_file))
self._rewrite_paths_in_file(run_dir_config_file, paths_to_replace)
return external_files |
def _parse_geo_file(self, file_path, run_input_dir):
"""Scan SH12A GEO file for references to external files (like voxelised geometry) and return them"""
external_files = []
paths_to_replace = []
with open(file_path, 'r') as geo_f:
for line in geo_f.readlines():
split_line = line.split()
if len(split_line) > 0 and not line.startswith("*"):
base_path = os.path.join(self.input_path, split_line[0])
if os.path.isfile(base_path + '.hed'):
logger.debug("Found ctx + hed files: {0}".format(base_path))
external_files.append(base_path + '.hed')
# try to find ctx file
if os.path.isfile(base_path + '.ctx'):
external_files.append(base_path + '.ctx')
elif os.path.isfile(base_path + '.ctx.gz'):
external_files.append(base_path + '.ctx.gz')
# replace path to match symlink location
paths_to_replace.append(split_line[0])
if paths_to_replace:
run_dir_config_file = os.path.join(run_input_dir, os.path.split(file_path)[-1])
logger.debug("Calling rewrite_paths method on file: {0}".format(run_dir_config_file))
self._rewrite_paths_in_file(run_dir_config_file, paths_to_replace)
return external_files |
def _parse_mat_file(self, file_path):
"""Scan SH12A MAT file for ICRU+LOADEX pairs and return found ICRU numbers"""
mat_file_sections = self._extract_mat_sections(file_path)
return self._analyse_mat_sections(mat_file_sections) |
def _analyse_mat_sections(sections):
"""
Cases:
- ICRU flag present, LOADDEDX flag missing -> data loaded from some data hardcoded in SH12A binary,
no need to load external files
- ICRU flag present, LOADDEDX flag present -> data loaded from external files. ICRU number read from ICRU flag,
any number following LOADDEDX flag is ignored.
- ICRU flag missing, LOADDEDX flag present -> data loaded from external files. ICRU number read from LOADDEDX
- ICRU flag missing, LOADDEDX flag missing -> nothing happens
"""
icru_numbers = []
for section in sections:
load_present = False
load_value = False
icru_value = False
for e in section:
split_line = e.split()
if "LOADDEDX" in e:
load_present = True
if len(split_line) > 1:
load_value = split_line[1] if "!" not in split_line[1] else False # ignore ! comments
elif "ICRU" in e and len(split_line) > 1:
icru_value = split_line[1] if "!" not in split_line[1] else False # ignore ! comments
if load_present: # LOADDEDX is present, so external file is required
if icru_value: # if ICRU value was given
icru_numbers.append(icru_value)
elif load_value: # if only LOADDEDX with values was present in section
icru_numbers.append(load_value)
return icru_numbers |
def _decrypt_icru_files(numbers):
"""Find matching file names for given ICRU numbers"""
import json
icru_file = resource_string(__name__, os.path.join('data', 'SH12A_ICRU_table.json'))
ref_dict = json.loads(icru_file.decode('ascii'))
try:
return [ref_dict[e] for e in numbers]
except KeyError as er:
logger.error("There is no ICRU file for id: {0}".format(er))
raise |
def _rewrite_paths_in_file(config_file, paths_to_replace):
"""
Rewrite paths in config files to match convention job_xxxx/symlink
Requires path to run_xxxx/input/config_file and a list of paths_to_replace
"""
lines = []
# make a copy of config
import shutil
shutil.copyfile(config_file, str(config_file + '_original'))
with open(config_file) as infile:
for line in infile:
for old_path in paths_to_replace:
if old_path in line:
new_path = os.path.split(old_path)[-1]
line = line.replace(old_path, new_path)
logger.debug("Changed path {0} ---> {1} in file {2}".format(old_path, new_path, config_file))
lines.append(line)
with open(config_file, 'w') as outfile:
for line in lines:
outfile.write(line) |
def _check_exists(database: Database, table: LdapObjectClass, key: str, value: str):
""" Check if a given LDAP object exists. """
try:
get_one(table, Q(**{key: value}), database=database)
return True
except ObjectDoesNotExist:
return False |
def save_account(changes: Changeset, table: LdapObjectClass, database: Database) -> Changeset:
""" Modify a changes to add an automatically generated uidNumber. """
d = {}
settings = database.settings
uid_number = changes.get_value_as_single('uidNumber')
if uid_number is None:
scheme = settings['NUMBER_SCHEME']
first = settings.get('UID_FIRST', 10000)
d['uidNumber'] = Counters.get_and_increment(
scheme, "uidNumber", first,
lambda n: not _check_exists(database, table, 'uidNumber', n)
)
changes = changes.merge(d)
return changes |
def transform_source(text):
'''Replaces instances of
switch expression:
by
for __case in _Switch(n):
and replaces
case expression:
by
if __case(expression):
and
default:
by
if __case():
'''
toks = tokenize.generate_tokens(StringIO(text).readline)
result = []
replacing_keyword = False
for toktype, tokvalue, _, _, _ in toks:
if toktype == tokenize.NAME and tokvalue == 'switch':
result.extend([
(tokenize.NAME, 'for'),
(tokenize.NAME, '__case'),
(tokenize.NAME, 'in'),
(tokenize.NAME, '_Switch'),
(tokenize.OP, '(')
])
replacing_keyword = True
elif toktype == tokenize.NAME and (tokvalue == 'case' or tokvalue == 'default'):
result.extend([
(tokenize.NAME, 'if'),
(tokenize.NAME, '__case'),
(tokenize.OP, '(')
])
replacing_keyword = True
elif replacing_keyword and tokvalue == ':':
result.extend([
(tokenize.OP, ')'),
(tokenize.OP, ':')
])
replacing_keyword = False
else:
result.append((toktype, tokvalue))
return tokenize.untokenize(result) |
def search(self, base, scope, filterstr='(objectClass=*)',
attrlist=None, limit=None) -> Generator[Tuple[str, dict], None, None]:
"""
Search for entries in LDAP database.
"""
_debug("search", base, scope, filterstr, attrlist, limit)
# first results
if attrlist is None:
attrlist = ldap3.ALL_ATTRIBUTES
elif isinstance(attrlist, set):
attrlist = list(attrlist)
def first_results(obj):
_debug("---> searching ldap", limit)
obj.search(
base, filterstr, scope, attributes=attrlist, paged_size=limit)
return obj.response
# get the 1st result
result_list = self._do_with_retry(first_results)
# Loop over list of search results
for result_item in result_list:
# skip searchResRef for now
if result_item['type'] != "searchResEntry":
continue
dn = result_item['dn']
attributes = result_item['raw_attributes']
# did we already retrieve this from cache?
_debug("---> got ldap result", dn)
_debug("---> yielding", result_item)
yield (dn, attributes)
# we are finished - return results, eat cake
_debug("---> done")
return |
def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:
"""
rename a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled.
"""
raise NotImplementedError() |
def prepare_env(org):
""" Example shows how to configure environment from scratch """
# Add services
key_service = org.service(type='builtin:cobalt_secure_store', name='Keystore')
wf_service = org.service(type='builtin:workflow_service', name='Workflow', parameters='{}')
# Add services to environment
env = org.environment(name='default')
env.clean()
env.add_service(key_service)
env.add_service(wf_service)
env.add_policy(
{"action": "provisionVms",
"parameter": "publicKeyId",
"value": key_service.regenerate()['id']})
# Add cloud provider account
access = {
"provider": "aws-ec2",
"usedEnvironments": [],
"ec2SecurityGroup": "default",
"providerCopy": "aws-ec2",
"name": "test-provider",
"jcloudsIdentity": KEY,
"jcloudsCredential": SECRET_KEY,
"jcloudsRegions": "us-east-1"
}
prov = org.provider(access)
env.add_provider(prov)
return org.organizationId |
def start(ctx, debug, version, config):
"""Commands for devops operations"""
ctx.obj = {}
ctx.DEBUG = debug
if os.path.isfile(config):
with open(config) as fp:
agile = json.load(fp)
else:
agile = {}
ctx.obj['agile'] = agile
if version:
click.echo(__version__)
ctx.exit(0)
if not ctx.invoked_subcommand:
click.echo(ctx.get_help()) |
def duplicate(obj, value=None, field=None, duplicate_order=None):
"""
Duplicate all related objects of obj setting
field to value. If one of the duplicate
objects has an FK to another duplicate object
update that as well. Return the duplicate copy
of obj.
duplicate_order is a list of models which specify how
the duplicate objects are saved. For complex objects
this can matter. Check to save if objects are being
saved correctly and if not just pass in related objects
in the order that they should be saved.
"""
using = router.db_for_write(obj._meta.model)
collector = CloneCollector(using=using)
collector.collect([obj])
collector.sort()
related_models = list(collector.data.keys())
data_snapshot = {}
for key in collector.data.keys():
data_snapshot.update({
key: dict(zip(
[item.pk for item in collector.data[key]], [item for item in collector.data[key]]))
})
root_obj = None
# Sometimes it's good enough just to save in reverse deletion order.
if duplicate_order is None:
duplicate_order = reversed(related_models)
for model in duplicate_order:
# Find all FKs on model that point to a related_model.
fks = []
for f in model._meta.fields:
if isinstance(f, ForeignKey) and f.rel.to in related_models:
fks.append(f)
# Replace each `sub_obj` with a duplicate.
if model not in collector.data:
continue
sub_objects = collector.data[model]
for obj in sub_objects:
for fk in fks:
fk_value = getattr(obj, "%s_id" % fk.name)
# If this FK has been duplicated then point to the duplicate.
fk_rel_to = data_snapshot[fk.rel.to]
if fk_value in fk_rel_to:
dupe_obj = fk_rel_to[fk_value]
setattr(obj, fk.name, dupe_obj)
# Duplicate the object and save it.
obj.id = None
if field is not None:
setattr(obj, field, value)
obj.save()
if root_obj is None:
root_obj = obj
return root_obj |
def getPayloadStruct(self, attributes, objType):
""" Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param attribute: The data
@param objType: SubItem type (e.g: hostgroup for hostgroup_class)
@return RETURN: the payload
"""
payload = {self.payloadObj: attributes,
objType + "_class":
{self.payloadObj: attributes}}
return payload |
def validate_url(value):
""" Validate url. """
if not re.match(VIMEO_URL_RE, value) and not re.match(YOUTUBE_URL_RE, value):
raise ValidationError('Invalid URL - only Youtube, Vimeo can be used.') |
def enter_transaction_management(using=None):
"""
Enters transaction management for a running thread. It must be balanced
with the appropriate leave_transaction_management call, since the actual
state is managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.enter_transaction_management()
return
connection = tldap.backend.connections[using]
connection.enter_transaction_management() |
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.leave_transaction_management()
return
connection = tldap.backend.connections[using]
connection.leave_transaction_management() |
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
if using is None:
dirty = False
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
if connection.is_dirty():
dirty = True
return dirty
connection = tldap.backend.connections[using]
return connection.is_dirty() |
def is_managed(using=None):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if using is None:
managed = False
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
if connection.is_managed():
managed = True
return managed
connection = tldap.backend.connections[using]
return connection.is_managed() |
def commit(using=None):
"""
Does the commit itself and resets the dirty flag.
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.commit()
return
connection = tldap.backend.connections[using]
connection.commit() |
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.rollback()
return
connection = tldap.backend.connections[using]
connection.rollback() |
def _transaction_func(entering, exiting, using):
"""
Takes 3 things, an entering function (what to do to start this block of
transaction management), an exiting function (what to do to end it, on both
success and failure, and using which can be: None, indiciating transaction
should occur on all defined servers, or a callable, indicating that using
is None and to return the function already wrapped.
Returns either a Transaction objects, which is both a decorator and a
context manager, or a wrapped function, if using is a callable.
"""
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if callable(using):
return Transaction(entering, exiting, None)(using)
return Transaction(entering, exiting, using) |
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_value, using):
try:
if exc_value is not None:
if is_dirty(using=using):
rollback(using=using)
else:
commit(using=using)
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using) |
def commit_manually(using=None):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using) |
def run(self) -> Generator[Tuple[int, int, str, type], None, None]:
"""
Yields:
tuple (line_number: int, offset: int, text: str, check: type)
"""
if is_test_file(self.filename):
self.load()
for func in self.all_funcs():
try:
for error in func.check_all():
yield (error.line_number, error.offset, error.text, Checker)
except ValidationError as error:
yield error.to_flake8(Checker) |
def process_request(self, request):
"""
Reloads glitter URL patterns if page URLs change.
Avoids having to restart the server to recreate the glitter URLs being used by Django.
"""
global _urlconf_pages
page_list = list(
Page.objects.exclude(glitter_app_name='').values_list('id', 'url').order_by('id')
)
with _urlconf_lock:
if page_list != _urlconf_pages:
glitter_urls = 'glitter.urls'
if glitter_urls in sys.modules:
importlib.reload(sys.modules[glitter_urls])
_urlconf_pages = page_list |
def run(self):
"""
Execute all current and future payloads
Blocks and executes payloads until :py:meth:`stop` is called.
It is an error for any orphaned payload to return or raise.
"""
self._logger.info('runner started: %s', self)
try:
with self._lock:
assert not self.running.is_set() and self._stopped.is_set(), 'cannot re-run: %s' % self
self.running.set()
self._stopped.clear()
self._run()
except Exception:
self._logger.exception('runner aborted: %s', self)
raise
else:
self._logger.info('runner stopped: %s', self)
finally:
with self._lock:
self.running.clear()
self._stopped.set() |
def stop(self):
"""Stop execution of all current and future payloads"""
if not self.running.wait(0.2):
return
self._logger.debug('runner disabled: %s', self)
with self._lock:
self.running.clear()
self._stopped.wait() |
def delimit_words(string: str) -> Generator[str, None, None]:
"""
Delimit a string at word boundaries.
::
>>> import uqbar.strings
>>> list(uqbar.strings.delimit_words("i want to believe"))
['i', 'want', 'to', 'believe']
::
>>> list(uqbar.strings.delimit_words("S3Bucket"))
['S3', 'Bucket']
::
>>> list(uqbar.strings.delimit_words("Route53"))
['Route', '53']
"""
# TODO: Reimplement this
wordlike_characters = ("<", ">", "!")
current_word = ""
for i, character in enumerate(string):
if (
not character.isalpha()
and not character.isdigit()
and character not in wordlike_characters
):
if current_word:
yield current_word
current_word = ""
elif not current_word:
current_word += character
elif character.isupper():
if current_word[-1].isupper():
current_word += character
else:
yield current_word
current_word = character
elif character.islower():
if current_word[-1].isalpha():
current_word += character
else:
yield current_word
current_word = character
elif character.isdigit():
if current_word[-1].isdigit() or current_word[-1].isupper():
current_word += character
else:
yield current_word
current_word = character
elif character in wordlike_characters:
if current_word[-1] in wordlike_characters:
current_word += character
else:
yield current_word
current_word = character
if current_word:
yield current_word |
def normalize(string: str) -> str:
"""
Normalizes whitespace.
Strips leading and trailing blank lines, dedents, and removes trailing
whitespace from the result.
"""
string = string.replace("\t", " ")
lines = string.split("\n")
while lines and (not lines[0] or lines[0].isspace()):
lines.pop(0)
while lines and (not lines[-1] or lines[-1].isspace()):
lines.pop()
for i, line in enumerate(lines):
lines[i] = line.rstrip()
string = "\n".join(lines)
string = textwrap.dedent(string)
return string |
def to_dash_case(string: str) -> str:
"""
Convert a string to dash-delimited words.
::
>>> import uqbar.strings
>>> string = 'Tô Đặc Biệt Xe Lửa'
>>> print(uqbar.strings.to_dash_case(string))
to-dac-biet-xe-lua
::
>>> string = 'alpha.beta.gamma'
>>> print(uqbar.strings.to_dash_case(string))
alpha-beta-gamma
"""
string = unidecode.unidecode(string)
words = (_.lower() for _ in delimit_words(string))
string = "-".join(words)
return string |
def get_lib2to3_fixers():
'''returns a list of all fixers found in the lib2to3 library'''
fixers = []
fixer_dirname = fixer_dir.__path__[0]
for name in sorted(os.listdir(fixer_dirname)):
if name.startswith("fix_") and name.endswith(".py"):
fixers.append("lib2to3.fixes." + name[:-3])
return fixers |
def get_single_fixer(fixname):
'''return a single fixer found in the lib2to3 library'''
fixer_dirname = fixer_dir.__path__[0]
for name in sorted(os.listdir(fixer_dirname)):
if (name.startswith("fix_") and name.endswith(".py")
and fixname == name[4:-3]):
return "lib2to3.fixes." + name[:-3] |
def to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
# ensure value is valid
self.validate(value)
assert isinstance(value, list)
value = list(value)
for i, v in enumerate(value):
value[i] = self.value_to_db(v)
# return result
assert isinstance(value, list)
return value |
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
assert isinstance(value, list)
# convert every value in list
value = list(value)
for i, v in enumerate(value):
value[i] = self.value_to_python(v)
# return result
return value |
def validate(self, value):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
# check object type
if not isinstance(value, list):
raise tldap.exceptions.ValidationError(
"is not a list and max_instances is %s" %
self._max_instances)
# check maximum instances
if (self._max_instances is not None and
len(value) > self._max_instances):
raise tldap.exceptions.ValidationError(
"exceeds max_instances of %d" %
self._max_instances)
# check this required value is given
if self._required:
if len(value) == 0:
raise tldap.exceptions.ValidationError(
"is required")
# validate the value
for i, v in enumerate(value):
self.value_validate(v) |
def clean(self, value):
"""
Convert the value's type and run validation. Validation errors from
to_python and validate are propagated. The correct value is returned if
no error is raised.
"""
value = self.to_python(value)
self.validate(value)
return value |
def value_to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
if isinstance(value, six.string_types):
value = value.encode("utf_8")
return value |
def value_to_python(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, bytes):
raise tldap.exceptions.ValidationError("should be a bytes")
value = value.decode("utf_8")
return value |
def value_validate(self, value):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not isinstance(value, six.string_types):
raise tldap.exceptions.ValidationError("should be a string") |
def value_to_python(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, bytes):
raise tldap.exceptions.ValidationError("should be bytes")
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise tldap.exceptions.ValidationError("is invalid integer") |
def value_to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
assert isinstance(value, six.integer_types)
return str(value).encode("utf_8") |
def value_validate(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, six.integer_types):
raise tldap.exceptions.ValidationError("should be a integer")
try:
return str(value)
except (TypeError, ValueError):
raise tldap.exceptions.ValidationError("is invalid integer") |
def value_to_python(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, bytes):
raise tldap.exceptions.ValidationError("should be a bytes")
try:
value = int(value)
except (TypeError, ValueError):
raise tldap.exceptions.ValidationError("is invalid integer")
try:
value = datetime.date.fromtimestamp(value * 24 * 60 * 60)
except OverflowError:
raise tldap.exceptions.ValidationError("is too big a date")
return value |
def value_to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
assert isinstance(value, datetime.date)
assert not isinstance(value, datetime.datetime)
try:
value = value - datetime.date(year=1970, month=1, day=1)
except OverflowError:
raise tldap.exceptions.ValidationError("is too big a date")
return str(value.days).encode("utf_8") |
def value_validate(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, datetime.date):
raise tldap.exceptions.ValidationError("is invalid date")
# a datetime is also a date but they are not compatable
if isinstance(value, datetime.datetime):
raise tldap.exceptions.ValidationError("should be a date, not a datetime") |
def value_to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
assert isinstance(value, datetime.datetime)
try:
value = value - datetime.datetime(1970, 1, 1)
except OverflowError:
raise tldap.exceptions.ValidationError("is too big a date")
value = value.seconds + value.days * 24 * 3600
value = str(value).encode("utf_8")
return value |
def value_validate(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, datetime.datetime):
raise tldap.exceptions.ValidationError("is invalid date time") |
def value_to_python(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, bytes):
raise tldap.exceptions.ValidationError("should be a bytes")
length = len(value) - 8
if length % 4 != 0:
raise tldap.exceptions.ValidationError("Invalid sid")
length = length // 4
array = struct.unpack('<bbbbbbbb' + 'I' * length, value)
if array[1] != length:
raise tldap.exceptions.ValidationError("Invalid sid")
if array[2:7] != (0, 0, 0, 0, 0):
raise tldap.exceptions.ValidationError("Invalid sid")
array = ("S", ) + array[0:1] + array[7:]
return "-".join([str(i) for i in array]) |
def value_to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
assert isinstance(value, str)
array = value.split("-")
length = len(array) - 3
assert length >= 0
assert array[0] == 'S'
array = array[1:2] + [length, 0, 0, 0, 0, 0] + array[2:]
array = [int(i) for i in array]
return struct.pack('<bbbbbbbb' + 'I' * length, *array) |
def value_validate(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinstance(value, str):
raise tldap.exceptions.ValidationError("Invalid sid")
array = value.split("-")
length = len(array) - 3
if length < 1:
raise tldap.exceptions.ValidationError("Invalid sid")
if array.pop(0) != "S":
raise tldap.exceptions.ValidationError("Invalid sid")
try:
[int(i) for i in array]
except TypeError:
raise tldap.exceptions.ValidationError("Invalid sid") |
def get(self, id):
"""Get data for this component
"""
id = self.as_id(id)
url = '%s/%s' % (self, id)
response = self.http.get(url, auth=self.auth)
response.raise_for_status()
return response.json() |
def create(self, data):
"""Create a new component
"""
response = self.http.post(str(self), json=data, auth=self.auth)
response.raise_for_status()
return response.json() |
def update(self, id, data):
"""Update a component
"""
id = self.as_id(id)
response = self.http.patch(
'%s/%s' % (self, id), json=data, auth=self.auth
)
response.raise_for_status()
return response.json() |
def delete(self, id):
"""Delete a component by id
"""
id = self.as_id(id)
response = self.http.delete(
'%s/%s' % (self.api_url, id),
auth=self.auth)
response.raise_for_status() |
def get_list(self, url=None, callback=None, limit=100, **data):
"""Get a list of this github component
:param url: full url
:param Comp: a :class:`.Component` class
:param callback: Optional callback
:param limit: Optional number of items to retrieve
:param data: additional query data
:return: a list of ``Comp`` objects with data
"""
url = url or str(self)
data = dict(((k, v) for k, v in data.items() if v))
all_data = []
if limit:
data['per_page'] = min(limit, 100)
while url:
response = self.http.get(url, params=data, auth=self.auth)
response.raise_for_status()
result = response.json()
n = m = len(result)
if callback:
result = callback(result)
m = len(result)
all_data.extend(result)
if limit and len(all_data) > limit:
all_data = all_data[:limit]
break
elif m == n:
data = None
next = response.links.get('next', {})
url = next.get('url')
else:
break
return all_data |
def comments(self, issue):
"""Return all comments for this issue/pull request
"""
commit = self.as_id(issue)
return self.get_list(url='%s/%s/comments' % (self, commit)) |
def has_edit_permission(self, request, obj=None, version=None):
"""
Returns a boolean if the user in the request has edit permission for the object.
Can also be passed a version object to check if the user has permission to edit a version
of the object (if they own it).
"""
# Has the edit permission for this object type
permission_name = '{}.edit_{}'.format(self.opts.app_label, self.opts.model_name)
has_permission = request.user.has_perm(permission_name)
if obj is not None and has_permission is False:
has_permission = request.user.has_perm(permission_name, obj=obj)
if has_permission and version is not None:
# Version must not be saved, and must belong to this user
if version.version_number or version.owner != request.user:
has_permission = False
return has_permission |
def has_publish_permission(self, request, obj=None):
"""
Returns a boolean if the user in the request has publish permission for the object.
"""
permission_name = '{}.publish_{}'.format(self.opts.app_label, self.opts.model_name)
has_permission = request.user.has_perm(permission_name)
if obj is not None and has_permission is False:
has_permission = request.user.has_perm(permission_name, obj=obj)
return has_permission |
def semantic_version(tag):
"""Get a valid semantic version for tag
"""
try:
version = list(map(int, tag.split('.')))
assert len(version) == 3
return tuple(version)
except Exception as exc:
raise CommandError(
'Could not parse "%s", please use '
'MAJOR.MINOR.PATCH' % tag
) from exc |
def load(self, data):
""" Function load
Store the object data
"""
self.clear()
self.update(data)
self.enhance() |
def enhance(self):
""" Function enhance
Enhance the object with new item or enhanced items
"""
if self.objName in ['hosts', 'hostgroups',
'puppet_classes']:
from foreman.itemSmartClassParameter\
import ItemSmartClassParameter
self.update({'smart_class_parameters':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
ItemSmartClassParameter)}) |
def reload(self):
""" Function reload
Sync the full object
"""
self.load(self.api.get(self.objName, self.key)) |