Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
6,400 | def user_data(self, token, *args, **kwargs):
"""Loads user data from service"""
url = 'https://api.digitalocean.com/v2/account'
auth_header = {"Authorization": "Bearer %s" % token}
try:
return self.get_json(url, headers=auth_header)
except __HOLE__:
return None | ValueError | dataset/ETHPy150Open omab/python-social-auth/social/backends/digitalocean.py/DigitalOceanOAuth.user_data |
6,401 | def delegator(self, images):
"""
Receive all images, check them and send them to the worker thread.
"""
delegatorlist = []
for fullpath in images:
try: # recompress images already in the list
image = (i.image for i in self.imagelist
if i.image.fullpath == fullpath).next()
if image.compressed:
image.reset()
image.recompression = True
delegatorlist.append(image)
except __HOLE__:
if not path.isdir(fullpath):
self. add_image(fullpath, delegatorlist)
else:
self.walk(fullpath, delegatorlist)
self.update_table()
self.thread.compress_file(delegatorlist, self.showapp, self.verbose,
self.imagelist) | StopIteration | dataset/ETHPy150Open Kilian/Trimage/src/trimage/trimage.py/StartQT4.delegator |
6,402 | def safe_call(self, command):
""" cross-platform command-line check """
while True:
try:
return call(command, shell=True, stdout=PIPE)
except __HOLE__, e:
if e.errno == errno.EINTR:
continue
else:
raise | OSError | dataset/ETHPy150Open Kilian/Trimage/src/trimage/trimage.py/StartQT4.safe_call |
6,403 | def __init__(self, opts, remote, per_remote_defaults,
override_params, cache_root, role='gitfs'):
self.opts = opts
self.role = role
self.env_blacklist = self.opts.get(
'{0}_env_blacklist'.format(self.role), [])
self.env_whitelist = self.opts.get(
'{0}_env_whitelist'.format(self.role), [])
repo_conf = copy.deepcopy(per_remote_defaults)
per_remote_collisions = [x for x in override_params
if x in PER_REMOTE_ONLY]
if per_remote_collisions:
log.critical(
'The following parameter names are restricted to per-remote '
'use only: {0}. This is a bug, please report it.'.format(
', '.join(per_remote_collisions)
)
)
try:
valid_per_remote_params = override_params + PER_REMOTE_ONLY
except __HOLE__:
valid_per_remote_params = \
list(override_params) + list(PER_REMOTE_ONLY)
if isinstance(remote, dict):
self.id = next(iter(remote))
self.get_url()
per_remote_conf = dict(
[(key, six.text_type(val)) for key, val in
six.iteritems(salt.utils.repack_dictlist(remote[self.id]))]
)
if not per_remote_conf:
log.critical(
'Invalid per-remote configuration for {0} remote \'{1}\'. '
'If no per-remote parameters are being specified, there '
'may be a trailing colon after the URL, which should be '
'removed. Check the master configuration file.'
.format(self.role, self.id)
)
failhard(self.role)
# Separate the per-remote-only (non-global) parameters
per_remote_only = {}
for param in PER_REMOTE_ONLY:
if param in per_remote_conf:
per_remote_only[param] = per_remote_conf.pop(param)
per_remote_errors = False
for param in (x for x in per_remote_conf
if x not in valid_per_remote_params):
if param in AUTH_PARAMS \
and self.provider not in AUTH_PROVIDERS:
msg = (
'{0} authentication parameter \'{1}\' (from remote '
'\'{2}\') is only supported by the following '
'provider(s): {3}. Current {0}_provider is \'{4}\'.'
.format(
self.role,
param,
self.id,
', '.join(AUTH_PROVIDERS),
self.provider
)
)
if self.role == 'gitfs':
msg += (
'See the GitFS Walkthrough in the Salt '
'documentation for further information.'
)
log.critical(msg)
else:
msg = (
'Invalid {0} configuration parameter \'{1}\' in '
'remote {2}. Valid parameters are: {3}.'.format(
self.role,
param,
self.url,
', '.join(valid_per_remote_params)
)
)
if self.role == 'gitfs':
msg += (
' See the GitFS Walkthrough in the Salt '
'documentation for further information.'
)
log.critical(msg)
per_remote_errors = True
if per_remote_errors:
failhard(self.role)
repo_conf.update(per_remote_conf)
repo_conf.update(per_remote_only)
else:
self.id = remote
self.get_url()
# Winrepo doesn't support the 'root' option, but it still must be part
# of the GitProvider object because other code depends on it. Add it as
# an empty string.
if 'root' not in repo_conf:
repo_conf['root'] = ''
if self.role == 'winrepo' and 'name' not in repo_conf:
# Ensure that winrepo has the 'name' parameter set if it wasn't
# provided. Default to the last part of the URL, minus the .git if
# it is present.
repo_conf['name'] = self.url.rsplit('/', 1)[-1]
# Remove trailing .git from name
if repo_conf['name'].lower().endswith('.git'):
repo_conf['name'] = repo_conf['name'][:-4]
# Set all repo config params as attributes
for key, val in six.iteritems(repo_conf):
setattr(self, key, val)
if hasattr(self, 'mountpoint'):
self.mountpoint = salt.utils.url.strip_proto(self.mountpoint)
else:
# For providers which do not use a mountpoint, assume the
# filesystem is mounted at the root of the fileserver.
self.mountpoint = ''
if not isinstance(self.url, six.string_types):
log.critical(
'Invalid {0} remote \'{1}\'. Remotes must be strings, you '
'may need to enclose the URL in quotes'.format(
self.role,
self.id
)
)
failhard(self.role)
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
self.hash = hash_type(self.id).hexdigest()
self.cachedir_basename = getattr(self, 'name', self.hash)
self.cachedir = os.path.join(cache_root, self.cachedir_basename)
if not os.path.isdir(self.cachedir):
os.makedirs(self.cachedir)
try:
self.new = self.init_remote()
except Exception as exc:
msg = ('Exception caught while initializing {0} remote \'{1}\': '
'{2}'.format(self.role, self.id, exc))
if isinstance(self, GitPython):
msg += ' Perhaps git is not available.'
log.critical(msg, exc_info_on_loglevel=logging.DEBUG)
failhard(self.role) | TypeError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitProvider.__init__ |
6,404 | def clear_lock(self, lock_type='update'):
'''
Clear update.lk
'''
lock_file = self._get_lock_file(lock_type=lock_type)
def _add_error(errlist, exc):
msg = ('Unable to remove update lock for {0} ({1}): {2} '
.format(self.url, lock_file, exc))
log.debug(msg)
errlist.append(msg)
success = []
failed = []
try:
os.remove(lock_file)
except OSError as exc:
if exc.errno == errno.ENOENT:
# No lock file present
pass
elif exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
shutil.rmtree(lock_file)
except __HOLE__ as exc:
_add_error(failed, exc)
else:
_add_error(failed, exc)
else:
msg = 'Removed {0} lock for {1} remote \'{2}\''.format(
lock_type,
self.role,
self.id
)
log.debug(msg)
success.append(msg)
return success, failed | OSError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitProvider.clear_lock |
6,405 | def _lock(self, lock_type='update', failhard=False):
'''
Place a lock file if (and only if) it does not already exist.
'''
try:
fh_ = os.open(self._get_lock_file(lock_type),
os.O_CREAT | os.O_EXCL | os.O_WRONLY)
with os.fdopen(fh_, 'w'):
# Write the lock file and close the filehandle
os.write(fh_, str(os.getpid()))
except (__HOLE__, IOError) as exc:
if exc.errno == errno.EEXIST:
with salt.utils.fopen(self._get_lock_file(lock_type), 'r') as fd_:
try:
pid = int(fd_.readline().rstrip())
except ValueError:
# Lock file is empty, set pid to 0 so it evaluates as
# False.
pid = 0
#if self.opts.get("gitfs_global_lock") or pid and pid_exists(int(pid)):
global_lock_key = self.role + '_global_lock'
lock_file = self._get_lock_file(lock_type=lock_type)
if self.opts[global_lock_key]:
msg = (
'{0} is enabled and {1} lockfile {2} is present for '
'{3} remote \'{4}\'.'.format(
global_lock_key,
lock_type,
lock_file,
self.role,
self.id,
)
)
if pid:
msg += ' Process {0} obtained the lock'.format(pid)
if not pid_exists(pid):
msg += (' but this process is not running. The '
'update may have been interrupted. If '
'using multi-master with shared gitfs '
'cache, the lock may have been obtained '
'by another master.')
log.warning(msg)
if failhard:
raise
return
elif pid and pid_exists(pid):
log.warning('Process %d has a %s %s lock (%s)',
pid, self.role, lock_type, lock_file)
if failhard:
raise
return
else:
if pid:
log.warning(
'Process %d has a %s %s lock (%s), but this '
'process is not running. Cleaning up lock file.',
pid, self.role, lock_type, lock_file
)
success, fail = self.clear_lock()
if success:
return self._lock(lock_type='update',
failhard=failhard)
elif failhard:
raise
return
else:
msg = 'Unable to set {0} lock for {1} ({2}): {3} '.format(
lock_type,
self.id,
self._get_lock_file(lock_type),
exc
)
log.error(msg)
raise GitLockError(exc.errno, msg)
msg = 'Set {0} lock for {1} remote \'{2}\''.format(
lock_type,
self.role,
self.id
)
log.debug(msg)
return msg | OSError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitProvider._lock |
6,406 | @contextlib.contextmanager
def gen_lock(self, lock_type='update'):
'''
Set and automatically clear a lock
'''
lock_set = False
try:
self._lock(lock_type=lock_type, failhard=True)
lock_set = True
yield
except (OSError, __HOLE__, GitLockError) as exc:
raise GitLockError(exc.errno, exc.strerror)
finally:
if lock_set:
self.clear_lock(lock_type=lock_type) | IOError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitProvider.gen_lock |
6,407 | def get_url(self):
'''
Examine self.id and assign self.url (and self.branch, for git_pillar)
'''
if self.role in ('git_pillar', 'winrepo'):
# With winrepo and git_pillar, the remote is specified in the
# format '<branch> <url>', so that we can get a unique identifier
# to hash for each remote.
try:
self.branch, self.url = self.id.split(None, 1)
except __HOLE__:
self.branch = self.opts['{0}_branch'.format(self.role)]
self.url = self.id
else:
self.url = self.id | ValueError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitProvider.get_url |
6,408 | def dir_list(self, tgt_env):
'''
Get list of directories for the target environment using GitPython
'''
ret = set()
tree = self.get_tree(tgt_env)
if not tree:
return ret
if self.root:
try:
tree = tree / self.root
except __HOLE__:
return ret
relpath = lambda path: os.path.relpath(path, self.root)
else:
relpath = lambda path: path
add_mountpoint = lambda path: os.path.join(self.mountpoint, path)
for blob in tree.traverse():
if isinstance(blob, git.Tree):
ret.add(add_mountpoint(relpath(blob.path)))
if self.mountpoint:
ret.add(self.mountpoint)
return ret | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitPython.dir_list |
6,409 | def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
origin = self.repo.remotes[0]
try:
fetch_results = origin.fetch()
except __HOLE__:
fetch_results = origin.fetch()
new_objs = False
for fetchinfo in fetch_results:
if fetchinfo.old_commit is not None:
log.debug(
'{0} has updated \'{1}\' for remote \'{2}\' '
'from {3} to {4}'.format(
self.role,
fetchinfo.name,
self.id,
fetchinfo.old_commit.hexsha[:7],
fetchinfo.commit.hexsha[:7]
)
)
new_objs = True
elif fetchinfo.flags in (fetchinfo.NEW_TAG,
fetchinfo.NEW_HEAD):
log.debug(
'{0} has fetched new {1} \'{2}\' for remote \'{3}\' '
.format(
self.role,
'tag' if fetchinfo.flags == fetchinfo.NEW_TAG
else 'head',
fetchinfo.name,
self.id
)
)
new_objs = True
cleaned = self.clean_stale_refs()
return bool(new_objs or cleaned) | AssertionError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitPython._fetch |
6,410 | def file_list(self, tgt_env):
'''
Get file list for the target environment using GitPython
'''
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root:
try:
tree = tree / self.root
except __HOLE__:
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root)
else:
relpath = lambda path: path
add_mountpoint = lambda path: os.path.join(self.mountpoint, path)
for file_blob in tree.traverse():
if not isinstance(file_blob, git.Blob):
continue
file_path = add_mountpoint(relpath(file_blob.path))
files.add(file_path)
if stat.S_ISLNK(file_blob.mode):
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
symlinks[file_path] = link_tgt
return files, symlinks | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitPython.file_list |
6,411 | def find_file(self, path, tgt_env):
'''
Find the specified file in the specified environment
'''
tree = self.get_tree(tgt_env)
if not tree:
# Branch/tag/SHA not found
return None, None
blob = None
depth = 0
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
break
try:
file_blob = tree / path
if stat.S_ISLNK(file_blob.mode):
# Path is a symlink. The blob data corresponding to
# this path's object ID will be the target of the
# symlink. Follow the symlink and set path to the
# location indicated in the blob data.
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
path = os.path.normpath(
os.path.join(os.path.dirname(path), link_tgt)
)
else:
blob = file_blob
break
except __HOLE__:
# File not found or repo_path points to a directory
break
return blob, blob.hexsha if blob is not None else blob | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitPython.find_file |
6,412 | def checkout(self):
'''
Checkout the configured branch/tag
'''
local_ref = 'refs/heads/' + self.branch
remote_ref = 'refs/remotes/origin/' + self.branch
tag_ref = 'refs/tags/' + self.branch
try:
local_head = self.repo.lookup_reference('HEAD')
except KeyError:
log.warning(
'HEAD not present in %s remote \'%s\'', self.role, self.id
)
return None
try:
head_sha = local_head.get_object().hex
except AttributeError:
# Shouldn't happen, but just in case a future pygit2 API change
# breaks things, avoid a traceback and log an error.
log.error(
'Unable to get SHA of HEAD for %s remote \'%s\'',
self.role, self.id
)
return None
except KeyError:
head_sha = None
refs = self.repo.listall_references()
def _perform_checkout(checkout_ref, branch=True):
'''
DRY function for checking out either a branch or a tag
'''
try:
with self.gen_lock(lock_type='checkout'):
# Checkout the local branch corresponding to the
# remote ref.
self.repo.checkout(checkout_ref)
if branch:
self.repo.reset(oid, pygit2.GIT_RESET_HARD)
return True
except GitLockError as exc:
if exc.errno == errno.EEXIST:
# Re-raise with a different strerror containing a
# more meaningful error message for the calling
# function.
raise GitLockError(
exc.errno,
'Checkout lock exists for {0} remote \'{1}\''
.format(self.role, self.id)
)
else:
log.error(
'Error %d encountered obtaining checkout lock '
'for %s remote \'%s\'',
exc.errno,
self.role,
self.id
)
return False
try:
if remote_ref in refs:
# Get commit id for the remote ref
oid = self.repo.lookup_reference(remote_ref).get_object().id
if local_ref not in refs:
# No local branch for this remote, so create one and point
# it at the commit id of the remote ref
self.repo.create_reference(local_ref, oid)
try:
target_sha = \
self.repo.lookup_reference(remote_ref).get_object().hex
except KeyError:
log.error(
'pygit2 was unable to get SHA for %s in %s remote '
'\'%s\'', local_ref, self.role, self.id
)
return None
# Only perform a checkout if HEAD and target are not pointing
# at the same SHA1.
if head_sha != target_sha:
# Check existence of the ref in refs/heads/ which
# corresponds to the local HEAD. Checking out local_ref
# below when no local ref for HEAD is missing will raise an
# exception in pygit2 >= 0.21. If this ref is not present,
# create it. The "head_ref != local_ref" check ensures we
# don't try to add this ref if it is not necessary, as it
# would have been added above already. head_ref would be
# the same as local_ref if the branch name was changed but
# the cachedir was not (for example if a "name" parameter
# was used in a git_pillar remote, or if we are using
# winrepo which takes the basename of the repo as the
# cachedir).
head_ref = local_head.target
# If head_ref is not a string, it will point to a
# pygit2.Oid object and we are in detached HEAD mode.
# Therefore, there is no need to add a local reference. If
# head_ref == local_ref, then the local reference for HEAD
# in refs/heads/ already exists and again, no need to add.
if isinstance(head_ref, six.string_types) \
and head_ref not in refs and head_ref != local_ref:
branch_name = head_ref.partition('refs/heads/')[-1]
if not branch_name:
# Shouldn't happen, but log an error if it does
log.error(
'pygit2 was unable to resolve branch name from '
'HEAD ref \'{0}\' in {1} remote \'{2}\''.format(
head_ref, self.role, self.id
)
)
return None
remote_head = 'refs/remotes/origin/' + branch_name
if remote_head not in refs:
log.error(
'Unable to find remote ref \'{0}\' in {1} remote '
'\'{2}\''.format(head_ref, self.role, self.id)
)
return None
self.repo.create_reference(
head_ref,
self.repo.lookup_reference(remote_head).target
)
if not _perform_checkout(local_ref, branch=True):
return None
# Return the relative root, if present
return self.check_root()
elif tag_ref in refs:
tag_obj = self.repo.revparse_single(tag_ref)
if not isinstance(tag_obj, pygit2.Tag):
log.error(
'%s does not correspond to pygit2.Tag object',
tag_ref
)
else:
try:
# If no AttributeError raised, this is an annotated tag
tag_sha = tag_obj.target.hex
except AttributeError:
try:
tag_sha = tag_obj.hex
except __HOLE__:
# Shouldn't happen, but could if a future pygit2
# API change breaks things.
log.error(
'Unable to resolve %s from %s remote \'%s\' '
'to either an annotated or non-annotated tag',
tag_ref, self.role, self.id
)
return None
if head_sha != target_sha:
if not _perform_checkout(local_ref, branch=False):
return None
# Return the relative root, if present
return self.check_root()
except GitLockError:
raise
except Exception as exc:
log.error(
'Failed to checkout {0} from {1} remote \'{2}\': {3}'.format(
self.branch,
self.role,
self.id,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
return None
log.error(
'Failed to checkout {0} from {1} remote \'{2}\': remote ref '
'does not exist'.format(self.branch, self.role, self.id)
)
return None | AttributeError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2.checkout |
6,413 | def clean_stale_refs(self, local_refs=None): # pylint: disable=arguments-differ
'''
Clean stale local refs so they don't appear as fileserver environments
'''
if self.credentials is not None:
log.debug(
'pygit2 does not support detecting stale refs for '
'authenticated remotes, saltenvs will not reflect '
'branches/tags removed from remote \'{0}\''
.format(self.id)
)
return []
if local_refs is None:
local_refs = self.repo.listall_references()
remote_refs = []
cmd_str = 'git ls-remote origin'
cmd = subprocess.Popen(
shlex.split(cmd_str),
close_fds=not salt.utils.is_windows(),
cwd=self.repo.workdir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = cmd.communicate()[0]
if cmd.returncode != 0:
log.warning(
'Failed to list remote references for {0} remote \'{1}\'. '
'Output from \'{2}\' follows:\n{3}'.format(
self.role,
self.id,
cmd_str,
output
)
)
return []
for line in salt.utils.itertools.split(output, '\n'):
try:
# Rename heads to match the remote ref names from
# pygit2.Repository.listall_references()
remote_refs.append(
line.split()[-1].replace(b'refs/heads/',
b'refs/remotes/origin/')
)
except __HOLE__:
continue
cleaned = []
if remote_refs:
for ref in local_refs:
if ref.startswith('refs/heads/'):
# Local head, ignore it
continue
elif ref not in remote_refs:
self.repo.lookup_reference(ref).delete()
cleaned.append(ref)
if cleaned:
log.debug('{0} cleaned the following stale refs: {1}'
.format(self.role, cleaned))
return cleaned | IndexError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2.clean_stale_refs |
6,414 | def init_remote(self):
'''
Initialize/attach to a remote using pygit2. Return a boolean which
will let the calling function know whether or not a new repo was
initialized by this function.
'''
new = False
if not os.listdir(self.cachedir):
# Repo cachedir is empty, initialize a new repo there
self.repo = pygit2.init_repository(self.cachedir)
new = True
else:
# Repo cachedir exists, try to attach
try:
try:
self.repo = pygit2.Repository(self.cachedir)
except pygit2.GitError as exc:
import pwd
# https://github.com/libgit2/pygit2/issues/339
# https://github.com/libgit2/libgit2/issues/2122
if "Error stat'ing config file" not in str(exc):
raise
home = pwd.getpwnam(salt.utils.get_user()).pw_dir
pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home
self.repo = pygit2.Repository(self.cachedir)
except __HOLE__:
log.error(_INVALID_REPO.format(self.cachedir, self.url))
return new
self.gitdir = os.path.join(self.repo.workdir, '.git')
if not self.repo.remotes:
try:
self.repo.create_remote('origin', self.url)
# Ensure tags are also fetched
self.repo.config.set_multivar(
'remote.origin.fetch',
'FOO',
'+refs/tags/*:refs/tags/*'
)
self.repo.config.set_multivar(
'http.sslVerify',
'',
self.ssl_verify
)
except os.error:
# This exception occurs when two processes are trying to write
# to the git config at once, go ahead and pass over it since
# this is the only write. This should place a lock down.
pass
else:
new = True
return new | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2.init_remote |
6,415 | def dir_list(self, tgt_env):
'''
Get a list of directories for the target environment using pygit2
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all
the empty directories within it in the "blobs" list
'''
for entry in iter(tree):
if entry.oid not in self.repo:
# Entry is a submodule, skip it
continue
blob = self.repo[entry.oid]
if not isinstance(blob, pygit2.Tree):
continue
blobs.append(os.path.join(prefix, entry.name))
if len(blob):
_traverse(blob, blobs, os.path.join(prefix, entry.name))
ret = set()
tree = self.get_tree(tgt_env)
if not tree:
return ret
if self.root:
try:
oid = tree[self.root].oid
tree = self.repo[oid]
except __HOLE__:
return ret
if not isinstance(tree, pygit2.Tree):
return ret
relpath = lambda path: os.path.relpath(path, self.root)
else:
relpath = lambda path: path
blobs = []
if len(tree):
_traverse(tree, blobs, self.root)
add_mountpoint = lambda path: os.path.join(self.mountpoint, path)
for blob in blobs:
ret.add(add_mountpoint(relpath(blob)))
if self.mountpoint:
ret.add(self.mountpoint)
return ret | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2.dir_list |
6,416 | def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
origin = self.repo.remotes[0]
refs_pre = self.repo.listall_references()
fetch_kwargs = {}
if self.credentials is not None:
if self.use_callback:
fetch_kwargs['callbacks'] = \
pygit2.RemoteCallbacks(credentials=self.credentials)
else:
origin.credentials = self.credentials
try:
fetch_results = origin.fetch(**fetch_kwargs)
except GitError as exc:
exc_str = get_error_message(exc).lower()
if 'unsupported url protocol' in exc_str \
and isinstance(self.credentials, pygit2.Keypair):
log.error(
'Unable to fetch SSH-based {0} remote \'{1}\'. '
'You may need to add ssh:// to the repo string or '
'libgit2 must be compiled with libssh2 to support '
'SSH authentication.'.format(self.role, self.id)
)
elif 'authentication required but no callback set' in exc_str:
log.error(
'{0} remote \'{1}\' requires authentication, but no '
'authentication configured'.format(self.role, self.id)
)
else:
log.error(
'Error occured fetching {0} remote \'{1}\': {2}'.format(
self.role, self.id, exc
)
)
return False
try:
# pygit2.Remote.fetch() returns a dict in pygit2 < 0.21.0
received_objects = fetch_results['received_objects']
except (AttributeError, __HOLE__):
# pygit2.Remote.fetch() returns a class instance in
# pygit2 >= 0.21.0
received_objects = fetch_results.received_objects
if received_objects != 0:
log.debug(
'{0} received {1} objects for remote \'{2}\''
.format(self.role, received_objects, self.id)
)
else:
log.debug(
'{0} remote \'{1}\' is up-to-date'.format(self.role, self.id)
)
refs_post = self.repo.listall_references()
cleaned = self.clean_stale_refs(local_refs=refs_post)
return bool(received_objects or refs_pre != refs_post or cleaned) | TypeError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2._fetch |
6,417 | def file_list(self, tgt_env):
'''
Get file list for the target environment using pygit2
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all
the file paths and symlink info in the "blobs" dict
'''
for entry in iter(tree):
if entry.oid not in self.repo:
# Entry is a submodule, skip it
continue
obj = self.repo[entry.oid]
if isinstance(obj, pygit2.Blob):
repo_path = os.path.join(prefix, entry.name)
blobs.setdefault('files', []).append(repo_path)
if stat.S_ISLNK(tree[entry.name].filemode):
link_tgt = self.repo[tree[entry.name].oid].data
blobs.setdefault('symlinks', {})[repo_path] = link_tgt
elif isinstance(obj, pygit2.Tree):
_traverse(obj, blobs, os.path.join(prefix, entry.name))
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root:
try:
# This might need to be changed to account for a root that
# spans more than one directory
oid = tree[self.root].oid
tree = self.repo[oid]
except __HOLE__:
return files, symlinks
if not isinstance(tree, pygit2.Tree):
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root)
else:
relpath = lambda path: path
blobs = {}
if len(tree):
_traverse(tree, blobs, self.root)
add_mountpoint = lambda path: os.path.join(self.mountpoint, path)
for repo_path in blobs.get('files', []):
files.add(add_mountpoint(relpath(repo_path)))
for repo_path, link_tgt in six.iteritems(blobs.get('symlinks', {})):
symlinks[add_mountpoint(relpath(repo_path))] = link_tgt
return files, symlinks | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2.file_list |
6,418 | def find_file(self, path, tgt_env):
'''
Find the specified file in the specified environment
'''
tree = self.get_tree(tgt_env)
if not tree:
# Branch/tag/SHA not found in repo
return None, None
blob = None
depth = 0
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
break
try:
if stat.S_ISLNK(tree[path].filemode):
# Path is a symlink. The blob data corresponding to this
# path's object ID will be the target of the symlink. Follow
# the symlink and set path to the location indicated
# in the blob data.
link_tgt = self.repo[tree[path].oid].data
path = os.path.normpath(
os.path.join(os.path.dirname(path), link_tgt)
)
else:
oid = tree[path].oid
blob = self.repo[oid]
except __HOLE__:
break
return blob, blob.hex if blob is not None else blob | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2.find_file |
6,419 | def get_tree(self, tgt_env):
'''
Return a pygit2.Tree object if the branch/tag/SHA is found, otherwise
None
'''
if tgt_env == 'base':
tgt_ref = self.base
else:
tgt_ref = tgt_env
for ref in self.repo.listall_references():
_, rtype, rspec = ref.split('/', 2)
if rtype in ('remotes', 'tags'):
parted = rspec.partition('/')
rspec = parted[2] if parted[2] else parted[0]
if rspec == tgt_ref and self.env_is_exposed(tgt_env):
return self.repo.lookup_reference(ref).get_object().tree
# Branch or tag not matched, check if 'tgt_env' is a commit
if not self.env_is_exposed(tgt_env):
return None
try:
commit = self.repo.revparse_single(tgt_ref)
except (__HOLE__, TypeError):
# Not a valid commit, likely not a commit SHA
pass
else:
return commit.tree
return None | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Pygit2.get_tree |
6,420 | def dir_list(self, tgt_env):
'''
Get a list of directories for the target environment using dulwich
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a dulwich Tree object recursively, accumulating
all the empty directories within it in the "blobs" list
'''
for item in six.iteritems(tree):
try:
obj = self.repo.get_object(item.sha)
except __HOLE__:
# Entry is a submodule, skip it
continue
if not isinstance(obj, dulwich.objects.Tree):
continue
blobs.append(os.path.join(prefix, item.path))
if len(self.repo.get_object(item.sha)):
_traverse(obj, blobs, os.path.join(prefix, item.path))
ret = set()
tree = self.get_tree(tgt_env)
tree = self.walk_tree(tree, self.root)
if not isinstance(tree, dulwich.objects.Tree):
return ret
blobs = []
if len(tree):
_traverse(tree, blobs, self.root)
if self.root:
relpath = lambda path: os.path.relpath(path, self.root)
else:
relpath = lambda path: path
add_mountpoint = lambda path: os.path.join(self.mountpoint, path)
for blob in blobs:
ret.add(add_mountpoint(relpath(blob)))
if self.mountpoint:
ret.add(self.mountpoint)
return ret | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Dulwich.dir_list |
6,421 | def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
# origin is just a url here, there is no origin object
origin = self.url
client, path = \
dulwich.client.get_transport_and_path_from_url(
origin, thin_packs=True
)
refs_pre = self.repo.get_refs()
try:
refs_post = client.fetch(path, self.repo)
except dulwich.errors.NotGitRepository:
log.error(
'Dulwich does not recognize {0} as a valid remote '
'remote URL. Perhaps it is missing \'.git\' at the '
'end.'.format(self.id)
)
return False
except __HOLE__:
log.error(
'Local repository cachedir \'{0}\' (corresponding '
'remote: \'{1}\') has been corrupted. Salt will now '
'attempt to remove the local checkout to allow it to '
'be re-initialized in the next fileserver cache '
'update.'
.format(self.cachedir, self.id)
)
try:
salt.utils.rm_rf(self.cachedir)
except OSError as exc:
log.error(
'Unable to remove {0}: {1}'.format(self.cachedir, exc)
)
return False
else:
# Dulwich does not write fetched references to the gitdir, that is
# done manually below (see the "Update local refs" comment). Since
# A) gitfs doesn't check out any local branches, B) both Pygit2 and
# GitPython set remote refs when fetching instead of head refs, and
# C) Dulwich is not supported for git_pillar or winrepo, there is
# no harm in simply renaming the head refs from the fetch results
# to remote refs. This allows the same logic (see the
# "_get_envs_from_ref_paths()" function) to be used for all three
# GitProvider subclasses to derive available envs.
for ref in [x for x in refs_post if x.startswith('refs/heads/')]:
val = refs_post.pop(ref)
key = ref.replace('refs/heads/', 'refs/remotes/origin/', 1)
refs_post[key] = val
if refs_post is None:
# Empty repository
log.warning(
'{0} remote \'{1}\' is an empty repository and will '
'be skipped.'.format(self.role, self.id)
)
return False
if refs_pre != refs_post:
# Update local refs
for ref in self.get_env_refs(refs_post):
self.repo[ref] = refs_post[ref]
# Prune stale refs
for ref in refs_pre:
if ref not in refs_post:
del self.repo[ref]
return True
return False | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Dulwich._fetch |
6,422 | def file_list(self, tgt_env):
'''
Get file list for the target environment using dulwich
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a dulwich Tree object recursively, accumulating
all the file paths and symlinks info in the "blobs" dict
'''
for item in six.iteritems(tree):
try:
obj = self.repo.get_object(item.sha)
except __HOLE__:
# Entry is a submodule, skip it
continue
if isinstance(obj, dulwich.objects.Blob):
repo_path = os.path.join(prefix, item.path)
blobs.setdefault('files', []).append(repo_path)
mode, oid = tree[item.path]
if stat.S_ISLNK(mode):
link_tgt = self.repo.get_object(oid).as_raw_string()
blobs.setdefault('symlinks', {})[repo_path] = link_tgt
elif isinstance(obj, dulwich.objects.Tree):
_traverse(obj, blobs, os.path.join(prefix, item.path))
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
tree = self.walk_tree(tree, self.root)
if not isinstance(tree, dulwich.objects.Tree):
return files, symlinks
blobs = {}
if len(tree):
_traverse(tree, blobs, self.root)
if self.root:
relpath = lambda path: os.path.relpath(path, self.root)
else:
relpath = lambda path: path
add_mountpoint = lambda path: os.path.join(self.mountpoint, path)
for repo_path in blobs.get('files', []):
files.add(add_mountpoint(relpath(repo_path)))
for repo_path, link_tgt in six.iteritems(blobs.get('symlinks', {})):
symlinks[add_mountpoint(relpath(repo_path))] = link_tgt
return files, symlinks | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Dulwich.file_list |
6,423 | def find_file(self, path, tgt_env):
'''
Find the specified file in the specified environment
'''
tree = self.get_tree(tgt_env)
if not tree:
# Branch/tag/SHA not found
return None, None
blob = None
depth = 0
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
break
prefix_dirs, _, filename = path.rpartition(os.path.sep)
tree = self.walk_tree(tree, prefix_dirs)
if not isinstance(tree, dulwich.objects.Tree):
# Branch/tag/SHA not found in repo
break
try:
mode, oid = tree[filename]
if stat.S_ISLNK(mode):
# Path is a symlink. The blob data corresponding to
# this path's object ID will be the target of the
# symlink. Follow the symlink and set path to the
# location indicated in the blob data.
link_tgt = self.repo.get_object(oid).as_raw_string()
path = os.path.normpath(
os.path.join(os.path.dirname(path), link_tgt)
)
else:
blob = self.repo.get_object(oid)
break
except __HOLE__:
break
return blob, blob.sha().hexdigest() if blob is not None else blob | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Dulwich.find_file |
6,424 | def get_tree(self, tgt_env):
'''
Return a dulwich.objects.Tree object if the branch/tag/SHA is found,
otherwise None
'''
if tgt_env == 'base':
tgt_ref = self.base
else:
tgt_ref = tgt_env
refs = self.repo.get_refs()
# Sorting ensures we check heads (branches) before tags
for ref in sorted(self.get_env_refs(refs)):
# ref will be something like 'refs/remotes/origin/master'
try:
rtype, rspec = re.split('^refs/(remotes/origin|tags)/',
ref,
1)[-2:]
except __HOLE__:
# No match was fount for the split regex, we don't care about
# this ref. We shouldn't see any of these as the refs are being
# filtered through self.get_env_refs(), but just in case, this
# will avoid a traceback.
continue
if rspec == tgt_ref and self.env_is_exposed(tgt_env):
if rtype == 'remotes/origin':
commit = self.repo.get_object(refs[ref])
elif rtype == 'tags':
tag = self.repo.get_object(refs[ref])
if isinstance(tag, dulwich.objects.Tag):
# Tag.get_object() returns a 2-tuple, the 2nd element
# of which is the commit SHA to which the tag refers
commit = self.repo.get_object(tag.object[1])
elif isinstance(tag, dulwich.objects.Commit):
commit = tag
else:
log.error(
'Unhandled object type \'{0}\' in '
'Dulwich get_tree. This is a bug, please '
'report it.'.format(tag.type_name)
)
return self.repo.get_object(commit.tree)
# Branch or tag not matched, check if 'tgt_env' is a commit. This is more
# difficult with Dulwich because of its inability to deal with shortened
# SHA-1 hashes.
if not self.env_is_exposed(tgt_env):
return None
try:
int(tgt_ref, 16)
except ValueError:
# Not hexidecimal, likely just a non-matching environment
return None
try:
if len(tgt_ref) == 40:
sha_obj = self.repo.get_object(tgt_ref)
if isinstance(sha_obj, dulwich.objects.Commit):
sha_commit = sha_obj
else:
matches = set([
x for x in (
self.repo.get_object(y)
for y in self.repo.object_store
if y.startswith(tgt_ref)
)
if isinstance(x, dulwich.objects.Commit)
])
if len(matches) > 1:
log.warning('Ambiguous commit ID \'{0}\''.format(tgt_ref))
return None
try:
sha_commit = matches.pop()
except IndexError:
pass
except TypeError as exc:
log.warning('Invalid environment {0}: {1}'.format(tgt_env, exc))
except KeyError:
# No matching SHA
return None
try:
return self.repo.get_object(sha_commit.tree)
except NameError:
# No matching sha_commit object was created. Unable to find SHA.
pass
return None | ValueError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Dulwich.get_tree |
6,425 | def init_remote(self):
'''
Initialize/attach to a remote using dulwich. Return a boolean which
will let the calling function know whether or not a new repo was
initialized by this function.
'''
if self.url.startswith('ssh://'):
# Dulwich will throw an error if 'ssh://' is used, so make the URL
# use git+ssh:// as dulwich expects
self.url = 'git+' + self.url
new = False
if not os.listdir(self.cachedir):
# Repo cachedir is empty, initialize a new repo there
self.repo = dulwich.repo.Repo.init(self.cachedir)
new = True
else:
# Repo cachedir exists, try to attach
try:
self.repo = dulwich.repo.Repo(self.cachedir)
except dulwich.repo.NotGitRepository:
log.error(_INVALID_REPO.format(self.cachedir, self.url))
return new
self.gitdir = os.path.join(self.repo.path, '.git')
# Read in config file and look for the remote
try:
conf = self.get_conf()
conf.get(('remote', 'origin'), 'url')
except __HOLE__:
try:
conf.set('http', 'sslVerify', self.ssl_verify)
# Add remote manually, there is no function/object to do this
conf.set(
'remote "origin"',
'fetch',
'+refs/heads/*:refs/remotes/origin/*'
)
conf.set('remote "origin"', 'url', self.url)
conf.set('remote "origin"', 'pushurl', self.url)
conf.write_to_path()
except os.error:
pass
else:
new = True
except os.error:
pass
return new | KeyError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Dulwich.init_remote |
6,426 | def walk_tree(self, tree, path):
'''
Dulwich does not provide a means of directly accessing subdirectories.
This function will walk down to the directory specified by 'path', and
return a Tree object at that path. If path is an empty string, the
original tree will be returned, and if there are any issues encountered
walking the tree, None will be returned.
'''
if not path:
return tree
# Walk down the tree to get to the file
for parent in path.split(os.path.sep):
try:
tree = self.repo.get_object(tree[parent][1])
except (KeyError, __HOLE__):
# Directory not found, or tree passed into function is not a Tree
# object. Either way, desired path does not exist.
return None
return tree | TypeError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/Dulwich.walk_tree |
6,427 | def clear_old_remotes(self):
'''
Remove cache directories for remotes no longer configured
'''
try:
cachedir_ls = os.listdir(self.cache_root)
except OSError:
cachedir_ls = []
# Remove actively-used remotes from list
for repo in self.remotes:
try:
cachedir_ls.remove(repo.cachedir_basename)
except __HOLE__:
pass
to_remove = []
for item in cachedir_ls:
if item in ('hash', 'refs'):
continue
path = os.path.join(self.cache_root, item)
if os.path.isdir(path):
to_remove.append(path)
failed = []
if to_remove:
for rdir in to_remove:
try:
shutil.rmtree(rdir)
except OSError as exc:
log.error(
'Unable to remove old {0} remote cachedir {1}: {2}'
.format(self.role, rdir, exc)
)
failed.append(rdir)
else:
log.debug(
'{0} removed old cachedir {1}'.format(self.role, rdir)
)
for fdir in failed:
to_remove.remove(fdir)
ret = bool(to_remove)
if ret:
self.write_remote_map()
return ret | ValueError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitBase.clear_old_remotes |
6,428 | def clear_cache(self):
'''
Completely clear cache
'''
errors = []
for rdir in (self.cache_root, self.file_list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except __HOLE__ as exc:
errors.append(
'Unable to delete {0}: {1}'.format(rdir, exc)
)
return errors | OSError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitBase.clear_cache |
6,429 | def clear_lock(self, remote=None, lock_type='update'):
'''
Clear update.lk for all remotes
'''
cleared = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except __HOLE__:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.clear_lock(lock_type=lock_type)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | TypeError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitBase.clear_lock |
6,430 | def lock(self, remote=None):
'''
Place an update.lk
'''
locked = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except __HOLE__:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.lock()
locked.extend(success)
errors.extend(failed)
return locked, errors | TypeError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitBase.lock |
6,431 | def update(self):
'''
Execute a git fetch on all of the repos and perform maintenance on the
fileserver cache.
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'gitfs'}
data['changed'] = self.clear_old_remotes()
if self.fetch_remotes():
data['changed'] = True
if data['changed'] is True or not os.path.isfile(self.env_cache):
env_cachedir = os.path.dirname(self.env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
new_envs = self.envs(ignore_cache=True)
serial = salt.payload.Serial(self.opts)
with salt.utils.fopen(self.env_cache, 'w+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to {0}'.format(self.env_cache))
# if there is a change, fire an event
if self.opts.get('fileserver_events', False):
event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
event.fire_event(
data,
tagify(['gitfs', 'update'], prefix='fileserver')
)
try:
salt.fileserver.reap_fileserver_cache_dir(
self.hash_cachedir,
self.find_file
)
except (__HOLE__, IOError):
# Hash file won't exist if no files have yet been served up
pass | OSError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitBase.update |
6,432 | def get_provider(self):
'''
Determine which provider to use
'''
if 'verified_{0}_provider'.format(self.role) in self.opts:
self.provider = self.opts['verified_{0}_provider'.format(self.role)]
else:
desired_provider = self.opts.get('{0}_provider'.format(self.role))
if not desired_provider:
if self.verify_pygit2(quiet=True):
self.provider = 'pygit2'
elif self.verify_gitpython(quiet=True):
self.provider = 'gitpython'
elif self.verify_dulwich(quiet=True):
self.provider = 'dulwich'
else:
# Ensure non-lowercase providers work
try:
desired_provider = desired_provider.lower()
except __HOLE__:
# Should only happen if someone does something silly like
# set the provider to a numeric value.
desired_provider = str(desired_provider).lower()
if desired_provider not in self.valid_providers:
log.critical(
'Invalid {0}_provider \'{1}\'. Valid choices are: {2}'
.format(self.role,
desired_provider,
', '.join(self.valid_providers))
)
failhard(self.role)
elif desired_provider == 'pygit2' and self.verify_pygit2():
self.provider = 'pygit2'
elif desired_provider == 'gitpython' and self.verify_gitpython():
self.provider = 'gitpython'
elif desired_provider == 'dulwich' and self.verify_dulwich():
self.provider = 'dulwich'
if not hasattr(self, 'provider'):
log.critical(
'No suitable {0} provider module is installed.'
.format(self.role)
)
failhard(self.role)
if self.provider == 'pygit2':
self.provider_class = Pygit2
elif self.provider == 'gitpython':
self.provider_class = GitPython
elif self.provider == 'dulwich':
self.provider_class = Dulwich | AttributeError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitBase.get_provider |
6,433 | def write_remote_map(self):
'''
Write the remote_map.txt
'''
remote_map = os.path.join(self.cache_root, 'remote_map.txt')
try:
with salt.utils.fopen(remote_map, 'w+') as fp_:
timestamp = \
datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write(
'# {0}_remote map as of {1}\n'.format(
self.role,
timestamp
)
)
for repo in self.remotes:
fp_.write(
'{0} = {1}\n'.format(
repo.cachedir_basename,
repo.id
)
)
except __HOLE__:
pass
else:
log.info(
'Wrote new {0} remote map to {1}'.format(
self.role,
remote_map
)
) | OSError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitBase.write_remote_map |
6,434 | def find_file(self, path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or tgt_env not in self.envs():
return fnd
dest = os.path.join(self.cache_root, 'refs', tgt_env, path)
hashes_glob = os.path.join(self.hash_cachedir,
tgt_env,
'{0}.hash.*'.format(path))
blobshadest = os.path.join(self.hash_cachedir,
tgt_env,
'{0}.hash.blob_sha1'.format(path))
lk_fn = os.path.join(self.hash_cachedir,
tgt_env,
'{0}.lk'.format(path))
destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir):
try:
os.makedirs(destdir)
except OSError:
# Path exists and is a file, remove it and retry
os.remove(destdir)
os.makedirs(destdir)
if not os.path.isdir(hashdir):
try:
os.makedirs(hashdir)
except __HOLE__:
# Path exists and is a file, remove it and retry
os.remove(hashdir)
os.makedirs(hashdir)
for repo in self.remotes:
if repo.mountpoint \
and not path.startswith(repo.mountpoint + os.path.sep):
continue
repo_path = path[len(repo.mountpoint):].lstrip(os.path.sep)
if repo.root:
repo_path = os.path.join(repo.root, repo_path)
blob, blob_hexsha = repo.find_file(repo_path, tgt_env)
if blob is None:
continue
salt.fileserver.wait_lock(lk_fn, dest)
if os.path.isfile(blobshadest) and os.path.isfile(dest):
with salt.utils.fopen(blobshadest, 'r') as fp_:
sha = fp_.read()
if sha == blob_hexsha:
fnd['rel'] = path
fnd['path'] = dest
return fnd
with salt.utils.fopen(lk_fn, 'w+') as fp_:
fp_.write('')
for filename in glob.glob(hashes_glob):
try:
os.remove(filename)
except Exception:
pass
# Write contents of file to their destination in the FS cache
repo.write_file(blob, dest)
with salt.utils.fopen(blobshadest, 'w+') as fp_:
fp_.write(blob_hexsha)
try:
os.remove(lk_fn)
except OSError:
pass
fnd['rel'] = path
fnd['path'] = dest
return fnd
# No matching file was found in tgt_env. Return a dict with empty paths
# so the calling function knows the file could not be found.
return fnd | OSError | dataset/ETHPy150Open saltstack/salt/salt/utils/gitfs.py/GitFS.find_file |
6,435 | def DrawPaneButton(self, dc, window, button, button_state, rect, pane):
"""
Draws a pane button in the pane caption area.
:param `dc`: a :class:`DC` device context;
:param `window`: an instance of :class:`Window`;
:param integer `button`: the button to be drawn;
:param integer `button_state`: the pane button state;
:param Rect `rect`: the pane caption rectangle;
:param `pane`: the pane for which the button is drawn.
"""
if self.usingTheme:
hTheme = self.hTheme1
# Get the real button position (compensating for borders)
drect = wx.Rect(rect.x, rect.y, self._button_size, self._button_size)
# Draw the themed close button
rc = RECT(0, 0, 0, 0)
if pane.HasCaptionLeft():
rc.top = rect.x + self._button_border_size
rc.left = int(rect.y + 1.5*self._button_border_size)
rc.right = rect.x + self._button_size + self._button_border_size
rc.bottom = int(rect.y + self._button_size + 1.5*self._button_border_size)
else:
rc.top = rect.x - self._button_border_size
rc.left = int(rect.y + 1.5*self._button_border_size)
rc.right = rect.x + self._button_size- self._button_border_size
rc.bottom = int(rect.y + self._button_size + 1.5*self._button_border_size)
if button == AUI_BUTTON_CLOSE:
btntype = 19
elif button == AUI_BUTTON_PIN:
btntype = 23
elif button == AUI_BUTTON_MAXIMIZE_RESTORE:
if not pane.IsMaximized():
btntype = 17
else:
btntype = 21
else:
btntype = 15
state = 4 # CBS_DISABLED
if pane.state & optionActive:
if button_state == AUI_BUTTON_STATE_NORMAL:
state = 1 # CBS_NORMAL
elif button_state == AUI_BUTTON_STATE_HOVER:
state = 2 # CBS_HOT
elif button_state == AUI_BUTTON_STATE_PRESSED:
state = 3 # CBS_PUSHED
else:
raise Exception("ERROR: Unknown State.")
else: # inactive pane
if button_state == AUI_BUTTON_STATE_NORMAL:
state = 5 # CBS_NORMAL
elif button_state == AUI_BUTTON_STATE_HOVER:
state = 6 # CBS_HOT
elif button_state == AUI_BUTTON_STATE_PRESSED:
state = 7 # CBS_PUSHED
else:
raise Exception("ERROR: Unknown State.")
try:
winxptheme.DrawThemeBackground(hTheme, dc.GetHDC(), btntype, state, (rc.top, rc.left, rc.right, rc.bottom), None)
except __HOLE__:
return
else:
# Fallback to default closebutton if themes are not enabled
rect2 = wx.Rect(rect.x-4, rect.y+2, rect.width, rect.height)
AuiDefaultDockArt.DrawPaneButton(self, dc, window, button, button_state, rect2, pane) | TypeError | dataset/ETHPy150Open ContinuumIO/ashiba/enaml/enaml/wx/wx_upstream/aui/dockart.py/ModernDockArt.DrawPaneButton |
6,436 | def rosetta_csrf_token(parser, token):
try:
from django.template.defaulttags import csrf_token
return csrf_token(parser, token)
except __HOLE__:
return RosettaCsrfTokenPlaceholder() | ImportError | dataset/ETHPy150Open mbi/django-rosetta/rosetta/templatetags/rosetta.py/rosetta_csrf_token |
6,437 | def __init__(self):
self.creds_file = 'credentials.txt'
self.creds_fullpath = None
self.oauth = {}
try:
self.twitter_dir = os.environ['TWITTER']
self.creds_subdir = self.twitter_dir
except __HOLE__:
self.twitter_dir = None
self.creds_subdir = None | KeyError | dataset/ETHPy150Open nltk/nltk/nltk/twitter/util.py/Authenticate.__init__ |
6,438 | def isDate(date_text):
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
return True
except __HOLE__:
return False | ValueError | dataset/ETHPy150Open JeffHoogland/qutemtgstats/Code/PasteWindow.py/isDate |
6,439 | def testThemAll(self):
for entry in self.listOfTests:
output = entry[0]
exception = entry[1]
try:
args = entry[2]
except __HOLE__:
args = ()
try:
kwargs = entry[3]
except IndexError:
kwargs = {}
self.assertEqual(
str(exception(*args, **kwargs)),
output) | IndexError | dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/test/test_error.py/TestStringification.testThemAll |
6,440 | def open_config_file(config_file):
""" Opens a config file, logging common IOError exceptions.
"""
try:
return open(config_file)
except __HOLE__, e:
# This should only happen with the top level config file, since
# we use glob.glob on includes
if e.errno == 2:
logger.error("Could not find file '%s'.", config_file)
elif e.errno == 13:
logger.error("Invalid permissions to open '%s'.", config_file)
raise | IOError | dataset/ETHPy150Open cloudtools/nymms/nymms/config/yaml_config.py/open_config_file |
6,441 | def test_unique_for_date(self):
p1 = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = Post(title="Django 1.0 is released", posted=datetime.date(2008, 9, 3))
try:
p.full_clean()
except ValidationError, e:
self.assertEqual(e.message_dict, {'title': [u'Title must be unique for Posted date.']})
else:
self.fail('unique_for_date checks should catch this.')
# Should work without errors
p = Post(title="Work on Django 1.1 begins", posted=datetime.date(2008, 9, 3))
p.full_clean()
# Should work without errors
p = Post(title="Django 1.0 is released", posted=datetime.datetime(2008, 9,4))
p.full_clean()
p = Post(slug="Django 1.0", posted=datetime.datetime(2008, 1, 1))
try:
p.full_clean()
except __HOLE__, e:
self.assertEqual(e.message_dict, {'slug': [u'Slug must be unique for Posted year.']})
else:
self.fail('unique_for_year checks should catch this.')
p = Post(subtitle="Finally", posted=datetime.datetime(2008, 9, 30))
try:
p.full_clean()
except ValidationError, e:
self.assertEqual(e.message_dict, {'subtitle': [u'Subtitle must be unique for Posted month.']})
else:
self.fail('unique_for_month checks should catch this.')
p = Post(title="Django 1.0 is released")
try:
p.full_clean()
except ValidationError, e:
self.assertEqual(e.message_dict, {'posted': [u'This field cannot be null.']})
else:
self.fail("Model validation shouldn't allow an absent value for a DateField without null=True.") | ValidationError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/tests/modeltests/validation/test_unique.py/PerformUniqueChecksTest.test_unique_for_date |
6,442 | def test_unique_for_date_with_nullable_date(self):
p1 = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = FlexibleDatePost(title="Django 1.0 is released")
try:
p.full_clean()
except __HOLE__, e:
self.fail("unique_for_date checks shouldn't trigger when the associated DateField is None.")
except:
self.fail("unique_for_date checks shouldn't explode when the associated DateField is None.")
p = FlexibleDatePost(slug="Django 1.0")
try:
p.full_clean()
except ValidationError, e:
self.fail("unique_for_year checks shouldn't trigger when the associated DateField is None.")
except:
self.fail("unique_for_year checks shouldn't explode when the associated DateField is None.")
p = FlexibleDatePost(subtitle="Finally")
try:
p.full_clean()
except ValidationError, e:
self.fail("unique_for_month checks shouldn't trigger when the associated DateField is None.")
except:
self.fail("unique_for_month checks shouldn't explode when the associated DateField is None.") | ValidationError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/tests/modeltests/validation/test_unique.py/PerformUniqueChecksTest.test_unique_for_date_with_nullable_date |
6,443 | def __init__(self, container, service, entrypoint, args=None, kwargs=None,
data=None):
self.container = container
self.config = container.config # TODO: remove?
self.parent_calls_tracked = self.config.get(
PARENT_CALLS_CONFIG_KEY, DEFAULT_PARENT_CALLS_TRACKED)
self.service = service
self.entrypoint = entrypoint
self.service_name = self.container.service_name
self.args = args if args is not None else ()
self.kwargs = kwargs if kwargs is not None else {}
self.data = data if data is not None else {}
self.parent_call_stack, self.unique_id = self._init_call_id()
self.call_id = '{}.{}.{}'.format(
self.service_name, self.entrypoint.method_name, self.unique_id
)
n = -self.parent_calls_tracked
self.call_id_stack = self.parent_call_stack[n:]
self.call_id_stack.append(self.call_id)
try:
self.immediate_parent_call_id = self.parent_call_stack[-1]
except __HOLE__:
self.immediate_parent_call_id = None | IndexError | dataset/ETHPy150Open onefinestay/nameko/nameko/containers.py/WorkerContextBase.__init__ |
6,444 | def test_maps(self):
try:
maps = nis.maps()
except nis.error as msg:
# NIS is probably not active, so this test isn't useful
if support.verbose:
print("Test Skipped:", msg)
# Can't raise SkipTest as regrtest only recognizes the exception
# import time.
return
try:
# On some systems, this map is only accessible to the
# super user
maps.remove("passwd.adjunct.byname")
except __HOLE__:
pass
done = 0
for nismap in maps:
mapping = nis.cat(nismap)
for k, v in mapping.items():
if not k:
continue
if nis.match(k, nismap) != v:
self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap))
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_nis.py/NisTests.test_maps |
6,445 | def main(self):
source = ogr.Open(self.args.input, False)
source_layer = source.GetLayer(0)
try:
shutil.rmtree(self.args.output)
except __HOLE__:
pass
driver = ogr.GetDriverByName('ESRI Shapefile')
dest = driver.CreateDataSource(self.args.output)
dest_layer = dest.CreateLayer('difference', geom_type=ogr.wkbMultiPolygon)
for i in range(source_layer.GetLayerDefn().GetFieldCount()):
dest_layer.CreateField(source_layer.GetLayerDefn().GetFieldDefn(i))
mask_features = []
mask_boxes = []
for mask in self.args.masks:
geo = ogr.Open(mask, False)
layer = geo.GetLayer(0)
for feature in layer:
mask_features.append(feature)
mask_boxes.append(get_bounding_box(feature.GetGeometryRef()))
for feature in source_layer:
masked_feature = ogr.Feature(feature_def=source_layer.GetLayerDefn())
masked_feature.SetFrom(feature)
masked_geometry = feature.GetGeometryRef().Clone()
for (i, mask_feature) in enumerate(mask_features):
bounding_box = mask_boxes[i]
if not masked_geometry.Intersects(bounding_box):
continue
masked_geometry = masked_geometry.Difference(mask_feature.GetGeometryRef())
masked_feature.SetGeometryDirectly(masked_geometry)
dest_layer.CreateFeature(masked_feature) | OSError | dataset/ETHPy150Open onyxfish/ogrkit/ogrkit/utilities/difference.py/OGRDifference.main |
6,446 | def InstallPerfmonForService(serviceName, iniName, dllName = None):
# If no DLL name, look it up in the INI file name
if not dllName: # May be empty string!
dllName = win32api.GetProfileVal("Python", "dll", "", iniName)
# Still not found - look for the standard one in the same dir as win32service.pyd
if not dllName:
try:
tryName = os.path.join(os.path.split(win32service.__file__)[0], "perfmondata.dll")
if os.path.isfile(tryName):
dllName = tryName
except __HOLE__:
# Frozen app? - anyway, can't find it!
pass
if not dllName:
raise ValueError, "The name of the performance DLL must be available"
dllName = win32api.GetFullPathName(dllName)
# Now setup all the required "Performance" entries.
hkey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\%s" % (serviceName), 0, win32con.KEY_ALL_ACCESS)
try:
subKey = win32api.RegCreateKey(hkey, "Performance")
try:
win32api.RegSetValueEx(subKey, "Library", 0, win32con.REG_SZ, dllName)
win32api.RegSetValueEx(subKey, "Open", 0, win32con.REG_SZ, "OpenPerformanceData")
win32api.RegSetValueEx(subKey, "Close", 0, win32con.REG_SZ, "ClosePerformanceData")
win32api.RegSetValueEx(subKey, "Collect", 0, win32con.REG_SZ, "CollectPerformanceData")
finally:
win32api.RegCloseKey(subKey)
finally:
win32api.RegCloseKey(hkey)
# Now do the "Lodctr" thang...
try:
import perfmon
path, fname = os.path.split(iniName)
oldPath = os.getcwd()
if path:
os.chdir(path)
try:
perfmon.LoadPerfCounterTextStrings("python.exe " + fname)
finally:
os.chdir(oldPath)
except win32api.error, details:
print "The service was installed OK, but the performance monitor"
print "data could not be loaded.", details | AttributeError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/InstallPerfmonForService |
6,447 | def InstallService(pythonClassString, serviceName, displayName, startType = None, errorControl = None, bRunInteractive = 0, serviceDeps = None, userName = None, password = None, exeName = None, perfMonIni = None, perfMonDll = None, exeArgs = None, description = None):
# Handle the default arguments.
if startType is None:
startType = win32service.SERVICE_DEMAND_START
serviceType = win32service.SERVICE_WIN32_OWN_PROCESS
if bRunInteractive:
serviceType = serviceType | win32service.SERVICE_INTERACTIVE_PROCESS
if errorControl is None:
errorControl = win32service.SERVICE_ERROR_NORMAL
exeName = '"%s"' % LocatePythonServiceExe(exeName) # None here means use default PythonService.exe
commandLine = _GetCommandLine(exeName, exeArgs)
hscm = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
try:
hs = win32service.CreateService(hscm,
serviceName,
displayName,
win32service.SERVICE_ALL_ACCESS, # desired access
serviceType, # service type
startType,
errorControl, # error control type
commandLine,
None,
0,
serviceDeps,
userName,
password)
if description is not None:
try:
win32service.ChangeServiceConfig2(hs,win32service.SERVICE_CONFIG_DESCRIPTION,description)
except __HOLE__:
pass ## ChangeServiceConfig2 and description do not exist on NT
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
InstallPythonClassString(pythonClassString, serviceName)
# If I have performance monitor info to install, do that.
if perfMonIni is not None:
InstallPerfmonForService(serviceName, perfMonIni, perfMonDll) | NotImplementedError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/InstallService |
6,448 | def ChangeServiceConfig(pythonClassString, serviceName, startType = None, errorControl = None, bRunInteractive = 0, serviceDeps = None, userName = None, password = None, exeName = None, displayName = None, perfMonIni = None, perfMonDll = None, exeArgs = None, description = None):
# Before doing anything, remove any perfmon counters.
try:
import perfmon
perfmon.UnloadPerfCounterTextStrings("python.exe "+serviceName)
except (__HOLE__, win32api.error):
pass
# The EXE location may have changed
exeName = '"%s"' % LocatePythonServiceExe(exeName)
# Handle the default arguments.
if startType is None: startType = win32service.SERVICE_NO_CHANGE
if errorControl is None: errorControl = win32service.SERVICE_NO_CHANGE
hscm = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
serviceType = win32service.SERVICE_WIN32_OWN_PROCESS
if bRunInteractive:
serviceType = serviceType | win32service.SERVICE_INTERACTIVE_PROCESS
commandLine = _GetCommandLine(exeName, exeArgs)
try:
hs = SmartOpenService(hscm, serviceName, win32service.SERVICE_ALL_ACCESS)
try:
win32service.ChangeServiceConfig(hs,
serviceType, # service type
startType,
errorControl, # error control type
commandLine,
None,
0,
serviceDeps,
userName,
password,
displayName)
if description is not None:
try:
win32service.ChangeServiceConfig2(hs,win32service.SERVICE_CONFIG_DESCRIPTION,description)
except NotImplementedError:
pass ## ChangeServiceConfig2 and description do not exist on NT
finally:
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
InstallPythonClassString(pythonClassString, serviceName)
# If I have performance monitor info to install, do that.
if perfMonIni is not None:
InstallPerfmonForService(serviceName, perfMonIni, perfMonDll) | ImportError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/ChangeServiceConfig |
6,449 | def SetServiceCustomOption(serviceName, option, value):
try:
serviceName = serviceName._svc_name_
except __HOLE__:
pass
key = win32api.RegCreateKey(win32con.HKEY_LOCAL_MACHINE, "System\\CurrentControlSet\\Services\\%s\\Parameters" % serviceName)
try:
if type(value)==type(0):
win32api.RegSetValueEx(key, option, 0, win32con.REG_DWORD, value);
else:
win32api.RegSetValueEx(key, option, 0, win32con.REG_SZ, value);
finally:
win32api.RegCloseKey(key) | AttributeError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/SetServiceCustomOption |
6,450 | def GetServiceCustomOption(serviceName, option, defaultValue = None):
# First param may also be a service class/instance.
# This allows services to pass "self"
try:
serviceName = serviceName._svc_name_
except __HOLE__:
pass
key = win32api.RegCreateKey(win32con.HKEY_LOCAL_MACHINE, "System\\CurrentControlSet\\Services\\%s\\Parameters" % serviceName)
try:
try:
return win32api.RegQueryValueEx(key, option)[0]
except win32api.error: # No value.
return defaultValue
finally:
win32api.RegCloseKey(key) | AttributeError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/GetServiceCustomOption |
6,451 | def RemoveService(serviceName):
try:
import perfmon
perfmon.UnloadPerfCounterTextStrings("python.exe "+serviceName)
except (__HOLE__, win32api.error):
pass
hscm = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
try:
hs = SmartOpenService(hscm, serviceName, win32service.SERVICE_ALL_ACCESS)
win32service.DeleteService(hs)
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
import win32evtlogutil
try:
win32evtlogutil.RemoveSourceFromRegistry(serviceName)
except win32api.error:
pass | ImportError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/RemoveService |
6,452 | def HandleCommandLine(cls, serviceClassString = None, argv = None, customInstallOptions = "", customOptionHandler = None):
"""Utility function allowing services to process the command line.
Allows standard commands such as 'start', 'stop', 'debug', 'install' etc.
Install supports 'standard' command line options prefixed with '--', such as
--username, --password, etc. In addition,
the function allows custom command line options to be handled by the calling function.
"""
err = 0
if argv is None: argv = sys.argv
if len(argv)<=1:
usage()
serviceName = cls._svc_name_
serviceDisplayName = cls._svc_display_name_
if serviceClassString is None:
serviceClassString = GetServiceClassString(cls)
# Pull apart the command line
import getopt
try:
opts, args = getopt.getopt(argv[1:], customInstallOptions,["password=","username=","startup=","perfmonini=", "perfmondll=", "interactive", "wait="])
except getopt.error, details:
print details
usage()
userName = None
password = None
perfMonIni = perfMonDll = None
startup = None
interactive = None
waitSecs = 0
for opt, val in opts:
if opt=='--username':
userName = val
elif opt=='--password':
password = val
elif opt=='--perfmonini':
perfMonIni = val
elif opt=='--perfmondll':
perfMonDll = val
elif opt=='--interactive':
interactive = 1
elif opt=='--startup':
map = {"manual": win32service.SERVICE_DEMAND_START, "auto" : win32service.SERVICE_AUTO_START, "disabled": win32service.SERVICE_DISABLED}
try:
startup = map[string.lower(val)]
except KeyError:
print "'%s' is not a valid startup option" % val
elif opt=='--wait':
try:
waitSecs = int(val)
except ValueError:
print "--wait must specify an integer number of seconds."
usage()
arg=args[0]
knownArg = 0
# First we process all arguments which pass additional args on
if arg=="start":
knownArg = 1
print "Starting service %s" % (serviceName)
try:
StartService(serviceName, args[1:])
if waitSecs:
WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
except win32service.error, (hr, fn, msg):
print "Error starting service: %s" % msg
elif arg=="restart":
knownArg = 1
print "Restarting service %s" % (serviceName)
RestartService(serviceName, args[1:])
if waitSecs:
WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
elif arg=="debug":
knownArg = 1
if not hasattr(sys, "frozen"):
# non-frozen services use pythonservice.exe which handles a
# -debug option
svcArgs = string.join(args[1:])
try:
exeName = LocateSpecificServiceExe(serviceName)
except win32api.error, exc:
if exc[0] == winerror.ERROR_FILE_NOT_FOUND:
print "The service does not appear to be installed."
print "Please install the service before debugging it."
sys.exit(1)
raise
try:
os.system("%s -debug %s %s" % (exeName, serviceName, svcArgs))
# ^C is used to kill the debug service. Sometimes Python also gets
# interrupted - ignore it...
except KeyboardInterrupt:
pass
else:
# py2exe services don't use pythonservice - so we simulate
# debugging here.
DebugService(cls, args)
if not knownArg and len(args)<>1:
usage() # the rest of the cmds don't take addn args
if arg=="install":
knownArg = 1
try:
serviceDeps = cls._svc_deps_
except AttributeError:
serviceDeps = None
try:
exeName = cls._exe_name_
except AttributeError:
exeName = None # Default to PythonService.exe
try:
exeArgs = cls._exe_args_
except AttributeError:
exeArgs = None
try:
description = cls._svc_description_
except AttributeError:
description = None
print "Installing service %s" % (serviceName,)
# Note that we install the service before calling the custom option
# handler, so if the custom handler fails, we have an installed service (from NT's POV)
# but is unlikely to work, as the Python code controlling it failed. Therefore
# we remove the service if the first bit works, but the second doesnt!
try:
InstallService(serviceClassString, serviceName, serviceDisplayName, serviceDeps = serviceDeps, startType=startup, bRunInteractive=interactive, userName=userName,password=password, exeName=exeName, perfMonIni=perfMonIni,perfMonDll=perfMonDll,exeArgs=exeArgs,description=description)
if customOptionHandler:
apply( customOptionHandler, (opts,) )
print "Service installed"
except win32service.error, (hr, fn, msg):
if hr==winerror.ERROR_SERVICE_EXISTS:
arg = "update" # Fall through to the "update" param!
else:
print "Error installing service: %s (%d)" % (msg, hr)
err = hr
except ValueError, msg: # Can be raised by custom option handler.
print "Error installing service: %s" % str(msg)
err = -1
# xxx - maybe I should remove after _any_ failed install - however,
# xxx - it may be useful to help debug to leave the service as it failed.
# xxx - We really _must_ remove as per the comments above...
# As we failed here, remove the service, so the next installation
# attempt works.
try:
RemoveService(serviceName)
except win32api.error:
print "Warning - could not remove the partially installed service."
if arg == "update":
knownArg = 1
try:
serviceDeps = cls._svc_deps_
except AttributeError:
serviceDeps = None
try:
exeName = cls._exe_name_
except AttributeError:
exeName = None # Default to PythonService.exe
try:
exeArgs = cls._exe_args_
except AttributeError:
exeArgs = None
try:
description=cls._svc_description_
except __HOLE__:
description=None
print "Changing service configuration"
try:
ChangeServiceConfig(serviceClassString, serviceName, serviceDeps = serviceDeps, startType=startup, bRunInteractive=interactive, userName=userName,password=password, exeName=exeName, displayName = serviceDisplayName, perfMonIni=perfMonIni,perfMonDll=perfMonDll,exeArgs=exeArgs,description=description)
if customOptionHandler:
apply( customOptionHandler, (opts,) )
print "Service updated"
except win32service.error, (hr, fn, msg):
print "Error changing service configuration: %s (%d)" % (msg,hr)
err = hr
elif arg=="remove":
knownArg = 1
print "Removing service %s" % (serviceName)
try:
RemoveService(serviceName)
print "Service removed"
except win32service.error, (hr, fn, msg):
print "Error removing service: %s (%d)" % (msg,hr)
err = hr
elif arg=="stop":
knownArg = 1
print "Stopping service %s" % (serviceName)
try:
if waitSecs:
StopServiceWithDeps(serviceName, waitSecs = waitSecs)
else:
StopService(serviceName)
except win32service.error, (hr, fn, msg):
print "Error stopping service: %s (%d)" % (msg,hr)
err = hr
if not knownArg:
err = -1
print "Unknown command - '%s'" % arg
usage()
return err
#
# Useful base class to build services from.
# | AttributeError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/HandleCommandLine |
6,453 | def SvcOther(self, control):
try:
print "Unknown control status - %d" % control
except __HOLE__:
# services may not have a valid stdout!
pass | IOError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32serviceutil.py/ServiceFramework.SvcOther |
6,454 | def get_global_variable_named(self, name):
try:
return self.globals[name]
except __HOLE__:
raise LLVMException(name) | KeyError | dataset/ETHPy150Open numba/llvmlite/llvmlite/llvmpy/core.py/Module.get_global_variable_named |
6,455 | def get_or_insert_named_metadata(self, name):
try:
return self.get_named_metadata(name)
except __HOLE__:
return self.add_named_metadata(name) | KeyError | dataset/ETHPy150Open numba/llvmlite/llvmlite/llvmpy/core.py/Module.get_or_insert_named_metadata |
6,456 | def postcommit_after_request(response, base_status_error_code=500):
if response.status_code >= base_status_error_code:
_local.postcommit_queue = set()
return response
try:
if settings.ENABLE_VARNISH and postcommit_queue():
import gevent
threads = [gevent.spawn(func, *args) for func, args in postcommit_queue()]
gevent.joinall(threads)
except __HOLE__:
if not settings.DEBUG_MODE:
logger.error('Post commit task queue not initialized')
return response | AttributeError | dataset/ETHPy150Open CenterForOpenScience/osf.io/framework/postcommit_tasks/handlers.py/postcommit_after_request |
6,457 | def main():
# Parse the command-line options.
parser = OptionParser()
parser.add_option(
"-v", "--verbosity",
action="store",
dest="verbosity",
default="1",
type="choice",
choices=["0", "1", "2", "3"],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output",
)
parser.add_option(
"--noinput",
action="store_false",
dest="interactive",
default=True,
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_option(
"--failfast",
action="store_true",
dest="failfast",
default=False,
help="Tells Django to stop running the test suite after first failed test.",
)
parser.add_option(
"-d", "--database",
action="store",
dest="database",
default="sqlite",
type="choice",
choices=list(AVAILABLE_DATABASES.keys()),
help="Select database backend for tests. Available choices: {}".format(
', '.join(AVAILABLE_DATABASES.keys())),
)
options, args = parser.parse_args()
# Configure Django.
from django.conf import settings
# database settings
if options.database:
database_setting = AVAILABLE_DATABASES[options.database]
if options.database == "sqlite":
database_default_name = os.path.join(os.path.dirname(__file__), "db.sqlite3")
else:
database_default_name = "test_project"
database_setting.update(dict(
NAME=os.environ.get("DB_NAME", database_default_name),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", "")))
else:
database_setting = dict(
ENGINE=os.environ.get("DB_ENGINE", 'django.db.backends.sqlite3'),
NAME=os.environ.get("DB_NAME", os.path.join(os.path.dirname(__file__), "db.sqlite3")),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", ""))
settings.configure(
DEBUG=False,
DATABASES={
"default": database_setting
},
ROOT_URLCONF="urls",
INSTALLED_APPS=(
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
"watson",
"test_watson",
),
MIDDLEWARE_CLASSES=(
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
),
USE_TZ=True,
STATIC_URL="/static/",
TEST_RUNNER="django.test.runner.DiscoverRunner",
)
# Run Django setup (1.7+).
import django
try:
django.setup()
except __HOLE__:
pass # This is Django < 1.7
# Configure the test runner.
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity = int(options.verbosity),
interactive = options.interactive,
failfast = options.failfast,
)
# Run the tests.
failures = test_runner.run_tests(["test_watson"])
if failures:
sys.exit(failures) | AttributeError | dataset/ETHPy150Open etianen/django-watson/src/tests/runtests.py/main |
6,458 | def load_tokens(email):
logger.debug('load_tokens')
try:
tokens = open('/tmp/resources/' + email, 'rU').read()
return tokens
except __HOLE__:
logger.exception('load_tokens')
return None | IOError | dataset/ETHPy150Open anantb/voicex/transport/google_voice/login.py/load_tokens |
6,459 | @property
def _api(self):
if not self._db_api:
with self._lock:
if not self._db_api:
db_api = api.DBAPI.from_config(
conf=self._conf, backend_mapping=self._backend_mapping)
if self._conf.database.use_tpool:
try:
from eventlet import tpool
except __HOLE__:
LOG.exception(_LE("'eventlet' is required for "
"TpoolDbapiWrapper."))
raise
self._db_api = tpool.Proxy(db_api)
else:
self._db_api = db_api
return self._db_api | ImportError | dataset/ETHPy150Open openstack/oslo.db/oslo_db/concurrency.py/TpoolDbapiWrapper._api |
6,460 | def get(self, url, parameters={}):
response = self.client.get(url, parameters)
try:
rv = simplejson.loads(response.content)
return rv
except __HOLE__, e:
print response.content
raise | ValueError | dataset/ETHPy150Open CollabQ/CollabQ/common/test/api.py/ApiIntegrationTest.get |
6,461 | def __contains__(self, key):
try:
value = self[key]
except __HOLE__:
return False
return True | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/test/utils.py/ContextList.__contains__ |
6,462 | def setup_test_template_loader(templates_dict, use_cached_loader=False):
"""
Changes Django to only find templates from within a dictionary (where each
key is the template name and each value is the corresponding template
content to return).
Use meth:`restore_template_loaders` to restore the original loaders.
"""
if hasattr(loader, RESTORE_LOADERS_ATTR):
raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR)
def test_template_loader(template_name, template_dirs=None):
"A custom template loader that loads templates from a dictionary."
try:
return (templates_dict[template_name], "test:%s" % template_name)
except __HOLE__:
raise TemplateDoesNotExist(template_name)
if use_cached_loader:
template_loader = cached.Loader(('test_template_loader',))
template_loader._cached_loaders = (test_template_loader,)
else:
template_loader = test_template_loader
setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders)
loader.template_source_loaders = (template_loader,)
return template_loader | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/test/utils.py/setup_test_template_loader |
6,463 | def setsudo(self, user=None):
"""Set the subsequent API calls to the user provided
:param user: User id or username to change to, None to return to the logged user
:return: Nothing
"""
if user is None:
try:
self.headers.pop("SUDO")
except __HOLE__:
pass
else:
self.headers["SUDO"] = user | KeyError | dataset/ETHPy150Open pyapi-gitlab/pyapi-gitlab/gitlab/__init__.py/Gitlab.setsudo |
6,464 | @register.tag
def membersof(parser, token):
"""
Given a collection and a content type, sets the results of :meth:`collection.members.with_model <.CollectionMemberManager.with_model>` as a variable in the context.
Usage::
{% membersof <collection> with <app_label>.<model_name> as <var> %}
"""
params=token.split_contents()
tag = params[0]
if len(params) < 6:
raise template.TemplateSyntaxError('"%s" template tag requires six parameters' % tag)
if params[2] != 'with':
raise template.TemplateSyntaxError('"%s" template tag requires the third parameter to be "with"' % tag)
try:
app_label, model = params[3].strip('"').split('.')
ct = ContentType.objects.get_by_natural_key(app_label, model)
except __HOLE__:
raise template.TemplateSyntaxError('"%s" template tag option "with" requires an argument of the form app_label.model (see django.contrib.contenttypes)' % tag)
except ContentType.DoesNotExist:
raise template.TemplateSyntaxError('"%s" template tag option "with" requires an argument of the form app_label.model which refers to an installed content type (see django.contrib.contenttypes)' % tag)
if params[4] != 'as':
raise template.TemplateSyntaxError('"%s" template tag requires the fifth parameter to be "as"' % tag)
return MembersofNode(collection=params[1], model=ct.model_class(), as_var=params[5]) | ValueError | dataset/ETHPy150Open ithinksw/philo/philo/templatetags/collections.py/membersof |
6,465 | def close(self):
"""Close the consumer, waiting indefinitely for any needed cleanup."""
if self._closed:
return
log.debug("Closing the KafkaConsumer.")
self._closed = True
self._coordinator.close()
self._metrics.close()
self._client.close()
try:
self.config['key_deserializer'].close()
except AttributeError:
pass
try:
self.config['value_deserializer'].close()
except __HOLE__:
pass
log.debug("The KafkaConsumer has closed.") | AttributeError | dataset/ETHPy150Open dpkp/kafka-python/kafka/consumer/group.py/KafkaConsumer.close |
6,466 | def __next__(self):
if not self._iterator:
self._iterator = self._message_generator()
self._set_consumer_timeout()
try:
return next(self._iterator)
except __HOLE__:
self._iterator = None
raise | StopIteration | dataset/ETHPy150Open dpkp/kafka-python/kafka/consumer/group.py/KafkaConsumer.__next__ |
6,467 | def run(self):
spec = self.get_spec()
assert not spec.is_local()
if spec.is_url():
return self._run_url(spec)
data = self.get_meta(spec)
try:
chk = data['sha1']
except __HOLE__:
raise PgxnClientException(
"sha1 missing from the distribution meta")
with self.api.download(data['name'], SemVer(data['version'])) as fin:
fn = network.download(fin, self.opts.target)
self.verify_checksum(fn, chk)
return fn | KeyError | dataset/ETHPy150Open dvarrazzo/pgxnclient/pgxnclient/commands/install.py/Download.run |
6,468 | def is_libdir_writable(self):
"""
Check if the Postgres installation directory is writable.
If it is, we will assume that sudo is not required to
install/uninstall the library, so the sudo program will not be invoked
or its specification will not be required.
"""
dir = self.call_pg_config('libdir')
logger.debug("testing if %s is writable", dir)
try:
f = tempfile.TemporaryFile(prefix="pgxn-", suffix=".test", dir=dir)
f.write(b('test'))
f.close()
except (IOError, __HOLE__):
rv = False
else:
rv = True
return rv | OSError | dataset/ETHPy150Open dvarrazzo/pgxnclient/pgxnclient/commands/install.py/SudoInstallUninstall.is_libdir_writable |
6,469 | def _get_extensions(self):
"""
Return a list of pairs (name, sql file) to be loaded/unloaded.
Items are in loading order.
"""
spec = self.get_spec()
dist = self.get_meta(spec)
if 'provides' not in dist:
# No 'provides' specified: assume a single extension named
# after the distribution. This is automatically done by PGXN,
# but we should do ourselves to deal with local META files
# not mangled by the PGXN upload script yet.
name = dist['name']
for ext in self.opts.extensions:
if ext != name:
raise PgxnClientException(
"can't find extension '%s' in the distribution '%s'"
% (name, spec))
return [ (name, None) ]
rv = []
if not self.opts.extensions:
# All the extensions, in the order specified
# (assume we got an orddict from json)
for name, data in dist['provides'].items():
rv.append((name, data.get('file')))
else:
# Only the specified extensions
for name in self.opts.extensions:
try:
data = dist['provides'][name]
except __HOLE__:
raise PgxnClientException(
"can't find extension '%s' in the distribution '%s'"
% (name, spec))
rv.append((name, data.get('file')))
return rv | KeyError | dataset/ETHPy150Open dvarrazzo/pgxnclient/pgxnclient/commands/install.py/LoadUnload._get_extensions |
6,470 | def execute_task(task, retries=0, handlers_map=None):
"""Execute mapper's executor task.
This will try to determine the correct mapper handler for the task, will set
up all mock environment necessary for task execution, and execute the task
itself.
This function can be used for functional-style testing of functionality
depending on mapper framework.
Args:
task: a taskqueue task.
retries: the current retry of this task.
handlers_map: a dict from url regex to handler.
Returns:
the handler instance used for this task.
Raises:
Exception: whatever the task raises.
"""
# Find the handler class
if not handlers_map:
handlers_map = main.create_handlers_map()
url = task["url"]
handler = None
params = []
for (re_str, handler_class) in handlers_map:
re_str = "^" + re_str + "($|\\?)"
m = re.match(re_str, url)
if m:
params = m.groups()[:-1] # last groups was added by ($|\\?) above
break
else:
raise Exception("Can't determine handler for %s" % task)
request = mock_webapp.MockRequest()
request.set_url(url)
# Set dependent env vars if test hasn't set them.
version = "mr-test-support-version.1"
module = "mr-test-support-module"
default_version_hostname = "mr-test-support.appspot.com"
host = "%s.%s.%s" % (version.split(".")[0],
module,
default_version_hostname)
if "CURRENT_VERSION_ID" not in os.environ:
request.environ["CURRENT_VERSION_ID"] = version
if "DEFAULT_VERSION_HOSTNAME" not in os.environ:
request.environ["DEFAULT_VERSION_HOSTNAME"] = (
default_version_hostname)
if "CURRENT_MODULE_ID" not in os.environ:
request.environ["CURRENT_MODULE_ID"] = module
if "HTTP_HOST" not in os.environ:
request.environ["HTTP_HOST"] = host
# Set taskqueue specific headers and env vars.
for k, v in task.get("headers", []):
request.headers[k] = v
environ_key = "HTTP_" + k.replace("-", "_").upper()
request.environ[environ_key] = v
request.headers["X-AppEngine-TaskExecutionCount"] = retries
request.environ["HTTP_X_APPENGINE_TASKNAME"] = (
task.get("name", "default_task_name"))
request.environ["HTTP_X_APPENGINE_QUEUENAME"] = (
task.get("queue_name", "default"))
request.environ["PATH_INFO"] = request.path
if task["method"] == "POST":
# taskqueue_stub base64 encodes body when it returns the task to us.
request.body = base64.b64decode(task["body"])
for k, v in decode_task_payload(task).iteritems():
request.set(k, v)
response = mock_webapp.MockResponse()
saved_os_environ = os.environ
copy_os_environ = dict(os.environ)
copy_os_environ.update(request.environ)
try:
os.environ = copy_os_environ
# Webapp2 expects request/response in the handler instantiation, and calls
# initialize automatically.
handler = handler_class(request, response)
except __HOLE__:
# For webapp, setup request before calling initialize.
handler = handler_class()
handler.initialize(request, response)
finally:
os.environ = saved_os_environ
try:
os.environ = copy_os_environ
if task["method"] == "POST":
handler.post(*params)
elif task["method"] == "GET":
handler.get(*params)
else:
raise Exception("Unsupported method: %s" % task.method)
finally:
os.environ = saved_os_environ
if handler.response.status != 200:
raise Exception("Handler failure: %s (%s). \nTask: %s\nHandler: %s" %
(handler.response.status,
handler.response.status_message,
task,
handler))
return handler | TypeError | dataset/ETHPy150Open GoogleCloudPlatform/appengine-mapreduce/python/src/mapreduce/test_support.py/execute_task |
6,471 | def is_botocore():
try:
import botocore
return True
except ImportError:
if six.PY2:
try:
import boto
return False
except __HOLE__:
raise NotConfigured('missing botocore or boto library')
else:
raise NotConfigured('missing botocore library') | ImportError | dataset/ETHPy150Open scrapy/scrapy/scrapy/utils/boto.py/is_botocore |
6,472 | def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
# Compatibility with Django 1.7's stricter initialization
if hasattr(django, "setup"):
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
if len(test_args) == 0:
test_args = ["mailer.tests"]
except __HOLE__:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
if len(test_args) == 0:
test_args = ["mailer"]
failures = runner_class(
verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures) | ImportError | dataset/ETHPy150Open pinax/django-mailer/runtests.py/runtests |
6,473 | def init_app(self, app):
"""
Do setup that requires a Flask app.
"""
# register callback route and cookie-setting decorator
app.route('/oidc_callback')(self.oidc_callback)
app.after_request(self.after_request)
# load client_secrets.json
self.flow = flow_from_clientsecrets(
app.config['OIDC_CLIENT_SECRETS'],
scope=['openid', 'email'])
assert isinstance(self.flow, OAuth2WebServerFlow)
# create a cookie signer using the Flask secret key
self.cookie_serializer = TimedJSONWebSignatureSerializer(
app.config['SECRET_KEY'])
try:
self.google_apps_domain = app.config['OIDC_GOOGLE_APPS_DOMAIN']
except KeyError:
pass
try:
self.id_token_cookie_name = app.config['OIDC_ID_TOKEN_COOKIE_NAME']
except KeyError:
pass
try:
self.id_token_cookie_ttl = app.config['OIDC_ID_TOKEN_COOKIE_TTL']
except KeyError:
pass
try:
self.id_token_cookie_secure =\
app.config['OIDC_ID_TOKEN_COOKIE_SECURE']
except KeyError:
pass
try:
self.credentials_store = app.config['OIDC_CREDENTIALS_STORE']
except __HOLE__:
pass | KeyError | dataset/ETHPy150Open SteelPangolin/flask-oidc/flask_oidc/__init__.py/OpenIDConnect.init_app |
6,474 | def get_cookie_id_token(self):
try:
id_token_cookie = request.cookies[self.id_token_cookie_name]
return self.cookie_serializer.loads(id_token_cookie)
except (__HOLE__, SignatureExpired):
logger.debug("Missing or invalid ID token cookie", exc_info=True)
return None | KeyError | dataset/ETHPy150Open SteelPangolin/flask-oidc/flask_oidc/__init__.py/OpenIDConnect.get_cookie_id_token |
6,475 | def authenticate_or_redirect(self):
"""
Helper function suitable for @app.before_request and @check (below).
Sets g.oidc_id_token to the ID token if the user has successfully
authenticated, else returns a redirect object so they can go try
to authenticate.
:return: A redirect, or None if the user is authenticated.
"""
# the auth callback and error pages don't need user to be authenticated
if request.endpoint in frozenset(['oidc_callback', 'oidc_error']):
return None
# retrieve signed ID token cookie
id_token = self.get_cookie_id_token()
if id_token is None:
return self.redirect_to_auth_server(request.url)
# ID token expired
# when Google is the IdP, this happens after one hour
if self.time() >= id_token['exp']:
# get credentials from store
try:
credentials = self.credentials_store[id_token['sub']]
except __HOLE__:
logger.debug("Expired ID token, credentials missing",
exc_info=True)
return self.redirect_to_auth_server(request.url)
# refresh and store credentials
try:
credentials.refresh(self.http)
id_token = credentials.id_token
self.credentials_store[id_token['sub']] = credentials
self.set_cookie_id_token(id_token)
except AccessTokenRefreshError:
# Can't refresh. Wipe credentials and redirect user to IdP
# for re-authentication.
logger.debug("Expired ID token, can't refresh credentials",
exc_info=True)
del self.credentials_store[id_token['sub']]
return self.redirect_to_auth_server(request.url)
# make ID token available to views
g.oidc_id_token = id_token
return None | KeyError | dataset/ETHPy150Open SteelPangolin/flask-oidc/flask_oidc/__init__.py/OpenIDConnect.authenticate_or_redirect |
6,476 | def oidc_callback(self):
"""
Exchange the auth code for actual credentials,
then redirect to the originally requested page.
"""
# retrieve session and callback variables
try:
session_csrf_token = session.pop('oidc_csrf_token')
state = json.loads(request.args['state'])
csrf_token = state['csrf_token']
destination = state['destination']
code = request.args['code']
except (__HOLE__, ValueError):
logger.debug("Can't retrieve CSRF token, state, or code",
exc_info=True)
return self.oidc_error()
# check callback CSRF token passed to IdP
# against session CSRF token held by user
if csrf_token != session_csrf_token:
logger.debug("CSRF token mismatch")
return self.oidc_error()
# make a request to IdP to exchange the auth code for OAuth credentials
flow = self.flow_for_request()
credentials = flow.step2_exchange(code, http=self.http)
id_token = credentials.id_token
if not self.is_id_token_valid(id_token):
logger.debug("Invalid ID token")
if id_token.get('hd') != self.google_apps_domain:
return self.oidc_error(
"You must log in with an account from the {0} domain."
.format(self.google_apps_domain),
self.WRONG_GOOGLE_APPS_DOMAIN)
return self.oidc_error()
# store credentials by subject
# when Google is the IdP, the subject is their G+ account number
self.credentials_store[id_token['sub']] = credentials
# set a persistent signed cookie containing the ID token
# and redirect to the final destination
# TODO: validate redirect destination
response = redirect(destination)
self.set_cookie_id_token(id_token)
return response | KeyError | dataset/ETHPy150Open SteelPangolin/flask-oidc/flask_oidc/__init__.py/OpenIDConnect.oidc_callback |
6,477 | def _verify_action(self, actions, type_, name, value):
try:
action = actions[0]
if action.cls_action_type != type_:
return "Action type error. send:%s, val:%s" \
% (type_, action.cls_action_type)
except IndexError:
return "Action is not setting."
f_value = None
if name:
try:
if isinstance(name, list):
f_value = [getattr(action, n) for n in name]
else:
f_value = getattr(action, name)
except __HOLE__:
pass
if f_value != value:
return "Value error. send:%s=%s val:%s" \
% (name, value, f_value)
return True | AttributeError | dataset/ETHPy150Open osrg/ryu/ryu/tests/integrated/test_add_flow_v10.py/RunTest._verify_action |
6,478 | def get_disk_statistics(self):
"""
Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...)
"""
result = {}
if os.access('/proc/diskstats', os.R_OK):
self.proc_diskstats = True
fp = open('/proc/diskstats')
try:
for line in fp:
try:
columns = line.split()
# On early linux v2.6 versions, partitions have only 4
# output fields not 11. From linux 2.6.25 partitions
# have the full stats set.
if len(columns) < 14:
continue
major = int(columns[0])
minor = int(columns[1])
device = columns[2]
if (device.startswith('ram')
or device.startswith('loop')):
continue
result[(major, minor)] = {
'device': device,
'reads': float(columns[3]),
'reads_merged': float(columns[4]),
'reads_sectors': float(columns[5]),
'reads_milliseconds': float(columns[6]),
'writes': float(columns[7]),
'writes_merged': float(columns[8]),
'writes_sectors': float(columns[9]),
'writes_milliseconds': float(columns[10]),
'io_in_progress': float(columns[11]),
'io_milliseconds': float(columns[12]),
'io_milliseconds_weighted': float(columns[13])
}
except __HOLE__:
continue
finally:
fp.close()
else:
self.proc_diskstats = False
if not psutil:
self.log.error('Unable to import psutil')
return None
disks = psutil.disk_io_counters(True)
for disk in disks:
result[(0, len(result))] = {
'device': disk,
'reads': disks[disk].read_count,
'reads_sectors': (disks[disk].read_bytes
/ int(self.config['sector_size'])),
'reads_milliseconds': disks[disk].read_time,
'writes': disks[disk].write_count,
'writes_sectors': (disks[disk].write_bytes
/ int(self.config['sector_size'])),
'writes_milliseconds': disks[disk].write_time,
'io_milliseconds':
disks[disk].read_time + disks[disk].write_time,
'io_milliseconds_weighted':
disks[disk].read_time + disks[disk].write_time
}
return result | ValueError | dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/diskusage/diskusage.py/DiskUsageCollector.get_disk_statistics |
6,479 | def get_callable(lookup_view, can_fail=False):
"""
Convert a string version of a function name to the callable object.
If the lookup_view is not an import path, it is assumed to be a URL pattern
label and the original string is returned.
If can_fail is True, lookup_view might be a URL pattern label, so errors
during the import fail and the string is returned.
"""
if not callable(lookup_view):
try:
# Bail early for non-ASCII strings (they can't be functions).
lookup_view = lookup_view.encode('ascii')
mod_name, func_name = get_mod_func(lookup_view)
if func_name != '':
lookup_view = getattr(import_module(mod_name), func_name)
if not callable(lookup_view):
raise AttributeError("'%s.%s' is not a callable." % (mod_name, func_name))
except (__HOLE__, AttributeError):
if not can_fail:
raise
except UnicodeEncodeError:
pass
return lookup_view | ImportError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/get_callable |
6,480 | def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except __HOLE__:
return callback, ''
return callback[:dot], callback[dot+1:] | ValueError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/get_mod_func |
6,481 | def _get_callback(self):
if self._callback is not None:
return self._callback
try:
self._callback = get_callable(self._callback_str)
except ImportError, e:
mod_name, _ = get_mod_func(self._callback_str)
raise ViewDoesNotExist, "Could not import %s. Error was: %s" % (mod_name, str(e))
except __HOLE__, e:
mod_name, func_name = get_mod_func(self._callback_str)
raise ViewDoesNotExist, "Tried %s in module %s. Error was: %s" % (func_name, mod_name, str(e))
return self._callback | AttributeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/RegexURLPattern._get_callback |
6,482 | def _get_urlconf_module(self):
try:
return self._urlconf_module
except __HOLE__:
self._urlconf_module = import_module(self.urlconf_name)
return self._urlconf_module | AttributeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/RegexURLResolver._get_urlconf_module |
6,483 | def _get_url_patterns(self):
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except __HOLE__:
raise ImproperlyConfigured("The included urlconf %s doesn't have any "
"patterns in it" % self.urlconf_name)
return patterns | TypeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/RegexURLResolver._get_url_patterns |
6,484 | def _resolve_special(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type)
mod_name, func_name = get_mod_func(callback)
try:
return getattr(import_module(mod_name), func_name), {}
except (ImportError, __HOLE__), e:
raise ViewDoesNotExist, "Tried %s. Error was: %s" % (callback, str(e)) | AttributeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/RegexURLResolver._resolve_special |
6,485 | def reverse(self, lookup_view, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
try:
lookup_view = get_callable(lookup_view, True)
except (ImportError, __HOLE__), e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
unicode_args = [force_unicode(val) for val in args]
candidate = result % dict(zip(params, unicode_args))
else:
if set(kwargs.keys()) != set(params):
continue
unicode_kwargs = dict([(k, force_unicode(v)) for (k, v) in kwargs.items()])
candidate = result % unicode_kwargs
if re.search(u'^%s' % pattern, candidate, re.UNICODE):
return candidate
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found." % (lookup_view, args, kwargs)) | AttributeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/RegexURLResolver.reverse |
6,486 | def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
if prefix is None:
prefix = get_script_prefix()
if not isinstance(viewname, basestring):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
resolved_path = []
while path:
ns = path.pop()
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_app and current_app in app_list:
# If we are reversing for a particular app, use that namespace
ns = current_app
elif ns not in app_list:
# The name isn't shared by one of the instances (i.e., the default)
# so just pick the first instance as the default.
ns = app_list[0]
except __HOLE__:
pass
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
prefix = prefix + extra
except KeyError, key:
if resolved_path:
raise NoReverseMatch("%s is not a registered namespace inside '%s'" % (key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" % key)
return iri_to_uri(u'%s%s' % (prefix, resolver.reverse(view,
*args, **kwargs))) | KeyError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/core/urlresolvers.py/reverse |
6,487 | def do_transform(self):
"""Apply the transformation (if it exists) to the latest_value"""
if not self.transform:
return
try:
self.latest_value = utils.Transform(
expr=self.transform, value=self.latest_value,
timedelta=self.time_between_updates().total_seconds()).result()
except (__HOLE__, ValueError):
logger.warn("Invalid transformation '%s' for metric %s",
self.transfrom, self.pk)
self.transform = '' | TypeError | dataset/ETHPy150Open lincolnloop/salmon/salmon/metrics/models.py/Metric.do_transform |
6,488 | def get_module(path):
"""
A modified duplicate from Django's built in backend
retriever.
slugify = get_module('django.template.defaultfilters.slugify')
"""
try:
from importlib import import_module
except ImportError as e:
from django.utils.importlib import import_module
try:
mod_name, func_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError as e:
raise ImportError(
'Error importing alert function {0}: "{1}"'.format(mod_name, e))
try:
func = getattr(mod, func_name)
except __HOLE__:
raise ImportError(
('Module "{0}" does not define a "{1}" function'
).format(mod_name, func_name))
return func | AttributeError | dataset/ETHPy150Open zapier/django-rest-hooks/rest_hooks/utils.py/get_module |
6,489 | def _format_firewall_rules(firewall_policy):
try:
output = '[' + ',\n '.join([rule for rule in
firewall_policy['firewall_rules']]) + ']'
return output
except (__HOLE__, KeyError):
return '' | TypeError | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/neutron/v2_0/fw/firewallpolicy.py/_format_firewall_rules |
6,490 | def api_call(request, format="json"):
""" the public api
attempts to validate a request as a valid oauth request then
builds the appropriate api_user object and tries to dispatch
to the provided method
"""
servertime = api.utcnow()
try:
kwargs = oauth_util.get_method_kwargs(request)
json_params = kwargs.pop('json_params', None)
if json_params:
parsed = simplejson.loads(json_params)
# Turn the keys from unicode to str so that they can be used as method
# parameters.
kwargs.update(
dict([(str(k), v) for k, v in parsed.iteritems()]))
method = kwargs.pop('method', '').replace('.', '_')
if method == 'presence_send':
method = 'post'
if not method:
raise exception.ApiException(exception.NO_METHOD, "No method specified")
# Allows us to turn off authentication for testing purposes
if not settings.API_DISABLE_VERIFICATION:
api_user = request.user
else:
api_user = api.ROOT
method_ref = api.PublicApi.get_method(method, api_user)
if not method_ref:
raise exception.ApiException(exception.INVALID_METHOD,
'Invalid method: %s' % method)
if not api_user:
raise exception.ApiException(0x00, 'Invalid API user')
if getattr(api_user, 'legacy', None) and method == 'post':
kwargs['nick'] = api_user.nick
rv = method_ref(api_user, **kwargs)
if rv is None:
raise exception.ApiException(0x00, 'method %s returned None'%(method))
return render_api_response(rv, format, servertime=servertime)
except oauth_util.OAuthError, e:
exc = exception.ApiException(exception.OAUTH_ERROR, e.message)
return render_api_response(exc, format)
except exception.ApiException, e:
return render_api_response(e, format)
except __HOLE__, e:
exc = exception.ApiException(exception.INVALID_ARGUMENTS, str(e))
return render_api_response(exc, format)
except:
exception.handle_exception(request)
return render_api_response(request.errors[0], format)
# some error happened
return render_api_response(request.errors[0], format) | TypeError | dataset/ETHPy150Open CollabQ/CollabQ/api/views.py/api_call |
6,491 | def _socketpair(self):
"""Return a socket pair regardless of platform.
:rtype: (socket, socket)
"""
try:
server, client = socket.socketpair()
except __HOLE__:
# Connect in Windows
LOGGER.debug('Falling back to emulated socketpair behavior')
# Create the listening server socket & bind it to a random port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
# Get the port for the notifying socket to connect to
port = s.getsockname()[1]
# Create the notifying client socket and connect using a timer
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect():
client.connect(('127.0.0.1', port))
t = threading.Timer(0.01, connect)
t.start()
# Have the listening server socket listen and accept the connect
s.listen(0)
server, _unused = s.accept()
# Don't block on either socket
server.setblocking(0)
client.setblocking(0)
return server, client | AttributeError | dataset/ETHPy150Open gmr/rabbitpy/rabbitpy/io.py/IO._socketpair |
6,492 | def get_2000_top_level_counts(geography):
try:
pop2000 = geography['data']['2000']['P1']['P001001']
hu2000 = geography['data']['2000']['H1']['H001001']
return pop2000,hu2000
except __HOLE__:
return '','' | KeyError | dataset/ETHPy150Open ireapps/census/dataprocessing/deploy_csv.py/get_2000_top_level_counts |
6,493 | def write_table_data(flo, state_fips, sumlev, table_id):
"""Given a File-Like Object, write a table to it"""
w = UnicodeCSVWriter(flo)
metadata = fetch_table_label(table_id)
header = ['GEOID', 'SUMLEV'] + METADATA_HEADERS + ['POP100.2000','HU100.2000']
for key in sorted(metadata['labels']):
header.extend([key,"%s.2000" % key])
w.writerow(header)
query = {'sumlev': sumlev, 'metadata.STATE': state_fips }
collection = utils.get_geography_collection()
for geography in collection.find(query):
row = [geography['geoid'],geography['sumlev']]
for h in METADATA_HEADERS:
row.append(geography['metadata'][h])
pop2000,hu2000 = get_2000_top_level_counts(geography)
row.extend([pop2000,hu2000])
for key in sorted(metadata['labels']):
try:
row.append(geography['data']['2010'][table_id][key])
except KeyError, e:
if table_id.startswith('PCO'):
print "No data for %s at %s" % (table_id, sumlev)
return
raise e # don't otherwise expect this error, so raise it...
try:
row.append(geography['data']['2000'][table_id][key])
except __HOLE__:
row.append('')
w.writerow(row) | KeyError | dataset/ETHPy150Open ireapps/census/dataprocessing/deploy_csv.py/write_table_data |
6,494 | def gid_to_group(gid):
'''
Convert the group id to the group name on this system
gid
gid to convert to a group name
CLI Example:
.. code-block:: bash
salt '*' file.gid_to_group 0
'''
try:
gid = int(gid)
except __HOLE__:
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if gid == '':
# Don't even bother to feed it to grp
return ''
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
# If group is not present, fall back to the gid.
return gid | ValueError | dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/gid_to_group |
6,495 | def group_to_gid(group):
'''
Convert the group to the gid on this system
group
group to convert to its gid
CLI Example:
.. code-block:: bash
salt '*' file.group_to_gid root
'''
if group is None:
return ''
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except __HOLE__:
return '' | KeyError | dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/group_to_gid |
6,496 | def uid_to_user(uid):
'''
Convert a uid to a user name
uid
uid to convert to a username
CLI Example:
.. code-block:: bash
salt '*' file.uid_to_user 0
'''
try:
return pwd.getpwuid(uid).pw_name
except (__HOLE__, NameError):
# If user is not present, fall back to the uid.
return uid | KeyError | dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/uid_to_user |
6,497 | def user_to_uid(user):
'''
Convert user name to a uid
user
user name to convert to its uid
CLI Example:
.. code-block:: bash
salt '*' file.user_to_uid root
'''
if user is None:
user = salt.utils.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except __HOLE__:
return '' | KeyError | dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/user_to_uid |
6,498 | def chown(path, user, group):
'''
Chown a file, pass the file the desired user and group
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
'''
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
if not os.path.exists(path):
try:
# Broken symlinks will return false, but still need to be chowned
return os.lchown(path, uid, gid)
except __HOLE__:
pass
err += 'File not found'
if err:
return err
return os.chown(path, uid, gid) | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/chown |
6,499 | def find(path, *args, **kwargs):
'''
Approximate the Unix ``find(1)`` command and return a list of paths that
meet the specified criteria.
The options include match criteria:
.. code-block:: text
name = path-glob # case sensitive
iname = path-glob # case insensitive
regex = path-regex # case sensitive
iregex = path-regex # case insensitive
type = file-types # match any listed type
user = users # match any listed user
group = groups # match any listed group
size = [+-]number[size-unit] # default unit = byte
mtime = interval # modified since date
grep = regex # search file contents
and/or actions:
.. code-block:: text
delete [= file-types] # default type = 'f'
exec = command [arg ...] # where {} is replaced by pathname
print [= print-opts]
and/or depth criteria:
.. code-block:: text
maxdepth = maximum depth to transverse in path
mindepth = minimum depth to transverse before checking files or directories
The default action is ``print=path``
``path-glob``:
.. code-block:: text
* = match zero or more chars
? = match any char
[abc] = match a, b, or c
[!abc] or [^abc] = match anything except a, b, and c
[x-y] = match chars x through y
[!x-y] or [^x-y] = match anything except chars x through y
{a,b,c} = match a or b or c
``path-regex``: a Python Regex (regular expression) pattern to match pathnames
``file-types``: a string of one or more of the following:
.. code-block:: text
a: all file types
b: block device
c: character device
d: directory
p: FIFO (named pipe)
f: plain file
l: symlink
s: socket
``users``: a space and/or comma separated list of user names and/or uids
``groups``: a space and/or comma separated list of group names and/or gids
``size-unit``:
.. code-block:: text
b: bytes
k: kilobytes
m: megabytes
g: gigabytes
t: terabytes
interval:
.. code-block:: text
[<num>w] [<num>d] [<num>h] [<num>m] [<num>s]
where:
w: week
d: day
h: hour
m: minute
s: second
print-opts: a comma and/or space separated list of one or more of the
following:
.. code-block:: text
group: group name
md5: MD5 digest of file contents
mode: file permissions (as integer)
mtime: last modification time (as time_t)
name: file basename
path: file absolute path
size: file size in bytes
type: file type
user: user name
CLI Examples:
.. code-block:: bash
salt '*' file.find / type=f name=\\*.bak size=+10m
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
'''
if 'delete' in args:
kwargs['delete'] = 'f'
elif 'print' in args:
kwargs['print'] = 'path'
try:
finder = salt.utils.find.Finder(kwargs)
except __HOLE__ as ex:
return 'error: {0}'.format(ex)
ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i]
ret.sort()
return ret | ValueError | dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/find |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.