Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
5,200 | def test_set_font(self):
rcmod.set(font="Verdana")
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except __HOLE__:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set() | AssertionError | dataset/ETHPy150Open mwaskom/seaborn/seaborn/tests/test_rcmod.py/TestFonts.test_set_font |
5,201 | def test_different_sans_serif(self):
if LooseVersion(mpl.__version__) < LooseVersion("1.4"):
raise nose.SkipTest
rcmod.set()
rcmod.set_style(rc={"font.sans-serif":
["Verdana"]})
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except __HOLE__:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set() | AssertionError | dataset/ETHPy150Open mwaskom/seaborn/seaborn/tests/test_rcmod.py/TestFonts.test_different_sans_serif |
5,202 | def parse_host_args(self, *args):
"""
Splits out the patch subcommand and returns a comma separated list of host_strings
"""
self.subcommand = None
new_args = args
try:
sub = args[0]
if sub in ['project','templates','static','media','wsgi','webconf']:
self.subcommand = args[0]
new_args = args[1:]
except __HOLE__:
pass
return ','.join(new_args) | IndexError | dataset/ETHPy150Open bretth/woven/woven/management/commands/patch.py/Command.parse_host_args |
5,203 | def run(self):
self.set_arguments()
args = self.parser.parse_args()
try:
generate_json(args)
except __HOLE__:
pass | KeyboardInterrupt | dataset/ETHPy150Open projectatomic/atomic-reactor/atomic_reactor/cli/secret.py/CLI.run |
5,204 | def monkeypatch_pickle_builder():
import shutil
from os import path
try:
import cPickle as pickle
except __HOLE__:
import pickle
from sphinx.util.console import bold
def handle_finish(self):
# dump the global context
outfilename = path.join(self.outdir, 'globalcontext.pickle')
f = open(outfilename, 'wb')
try:
pickle.dump(self.globalcontext, f, 2)
finally:
f.close()
self.info(bold('dumping search index...'))
self.indexer.prune(self.env.all_docs)
f = open(path.join(self.outdir, 'searchindex.pickle'), 'wb')
try:
self.indexer.dump(f, 'pickle')
finally:
f.close()
# copy the environment file from the doctree dir to the output dir
# as needed by the web app
shutil.copyfile(path.join(self.doctreedir, sphinx.builder.ENV_PICKLE_FILENAME),
path.join(self.outdir, sphinx.builder.ENV_PICKLE_FILENAME))
# touch 'last build' file, used by the web application to determine
# when to reload its environment and clear the cache
open(path.join(self.outdir, sphinx.builder.LAST_BUILD_FILENAME), 'w').close()
sphinx.builder.PickleHTMLBuilder.handle_finish = handle_finish | ImportError | dataset/ETHPy150Open dcramer/django-compositepks/docs/_ext/djangodocs.py/monkeypatch_pickle_builder |
5,205 | def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
if self.can_import_settings:
try:
from django.utils import translation
translation.activate('en-us')
except __HOLE__, e:
# If settings should be available, but aren't,
# raise the error and quit.
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1) | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/base.py/BaseCommand.execute |
5,206 | def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except __HOLE__:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or '')) | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/base.py/BaseCommand.validate |
5,207 | def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, __HOLE__), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output) | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/base.py/AppCommand.handle |
5,208 | def copy_helper(style, app_or_project, name, directory, other_name=''):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
"""
# style -- A color style object (see django.core.management.color).
# app_or_project -- The string 'app' or 'project'.
# name -- The name of the application or project.
# directory -- The directory to which the layout template should be copied.
# other_name -- When copying an application layout, this should be the name
# of the project.
import re
import shutil
other = {'project': 'app', 'app': 'project'}[app_or_project]
if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
top_dir = os.path.join(directory, name)
try:
os.mkdir(top_dir)
except __HOLE__, e:
raise CommandError(e)
# Determine where the app or project templates are. Use
# django.__path__[0] because we don't know into which directory
# django has been installed.
template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
if relative_dir:
os.mkdir(os.path.join(top_dir, relative_dir))
for subdir in subdirs[:]:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if not f.endswith('.py'):
# Ignore .pyc, .pyo, .py.class etc, as they cause various
# breakages.
continue
path_old = os.path.join(d, f)
path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)) | OSError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/base.py/copy_helper |
5,209 | def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = importlib.import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e))
except __HOLE__, e:
raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr))
return cls(*args, **kwargs) | ValueError | dataset/ETHPy150Open adieu/django-nonrel/django/core/files/uploadhandler.py/load_handler |
5,210 | def _getInstallFunction(platform):
"""
Return a function to install the reactor most suited for the given platform.
@param platform: The platform for which to select a reactor.
@type platform: L{twisted.python.runtime.Platform}
@return: A zero-argument callable which will install the selected
reactor.
"""
# Linux: epoll(7) is the fault, since it scales well.
#
# OS X: poll(2) is not exposed by Python because it doesn't
# support all file descriptors (in particular, lack of PTY support
# is a problem) -- see <http://bugs.python.org/issue5154>. kqueue
# reactor is being rewritten (see
# <http://twistedmatrix.com/trac/ticket/1918>), and also has same
# restriction as poll(2) as far PTY support goes.
#
# Windows: IOCP should eventually be default, but still has some serious
# bugs, e.g. <http://twistedmatrix.com/trac/ticket/4667>.
#
# We therefore choose epoll(7) on Linux, poll(2) on other non-OS X POSIX
# platforms, and select(2) everywhere else.
try:
if platform.isLinux():
try:
from twisted.internet.epollreactor import install
except ImportError:
from twisted.internet.pollreactor import install
elif platform.getType() == 'posix' and not platform.isMacOSX():
from twisted.internet.pollreactor import install
else:
from twisted.internet.selectreactor import install
except __HOLE__:
from twisted.internet.selectreactor import install
return install | ImportError | dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/default.py/_getInstallFunction |
5,211 | def __new__(cls, path):
"""Construct new EPath. """
if isinstance(path, EPath):
return path
if not path:
raise ValueError("empty EPath")
_path = path
if path[0] == '/':
path = path[1:]
else:
raise NotImplementedError("non-root EPath")
epath = []
for selector in path.split('/'):
selector = selector.strip()
if not selector:
raise ValueError("empty selector")
index = 0
for c in selector:
if c.isalnum() or c == '_' or c == '|' or c == '?':
index += 1
else:
break
attrs = []
types = []
if index:
elements = selector[:index]
selector = selector[index:]
for element in elements.split('|'):
element = element.strip()
if not element:
raise ValueError("empty element")
if element.endswith('?'):
attrs.append(element[:-1])
else:
types.append(element)
span = None
if selector == '*':
pass
else:
if selector.startswith('['):
try:
i = selector.index(']')
except __HOLE__:
raise ValueError("expected ']', got EOL")
_span, span = selector[1:i], []
if ':' not in _span:
span = int(_span)
else:
for elt in _span.split(':', 3):
if not elt:
span.append(None)
else:
span.append(int(elt))
span = slice(*span)
selector = selector[i + 1:]
if selector:
raise ValueError("trailing characters in selector")
epath.append((attrs, types, span))
obj = object.__new__(cls)
obj._path = _path
obj._epath = epath
return obj | ValueError | dataset/ETHPy150Open sympy/sympy/sympy/simplify/epathtools.py/EPath.__new__ |
5,212 | def apply(self, expr, func, args=None, kwargs=None):
"""
Modify parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.apply(expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.apply(expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
def _apply(path, expr, func):
if not path:
return func(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
if not expr.is_Atom:
args, basic = self._get_ordered_args(expr), True
else:
return expr
elif hasattr(expr, '__iter__'):
args, basic = expr, False
else:
return expr
args = list(args)
if span is not None:
if type(span) == slice:
indices = range(*span.indices(len(args)))
else:
indices = [span]
else:
indices = range(len(args))
for i in indices:
try:
arg = args[i]
except __HOLE__:
continue
if self._has(arg, attrs, types):
args[i] = _apply(path, arg, func)
if basic:
return expr.func(*args)
else:
return expr.__class__(args)
_args, _kwargs = args or (), kwargs or {}
_func = lambda expr: func(expr, *_args, **_kwargs)
return _apply(self._epath, expr, _func) | IndexError | dataset/ETHPy150Open sympy/sympy/sympy/simplify/epathtools.py/EPath.apply |
5,213 | def select(self, expr):
"""
Retrieve parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.select(expr)
[x, y]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.select(expr)
[x, x, y]
"""
result = []
def _select(path, expr):
if not path:
result.append(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
args = self._get_ordered_args(expr)
elif hasattr(expr, '__iter__'):
args = expr
else:
return
if span is not None:
if type(span) == slice:
args = args[span]
else:
try:
args = [args[span]]
except __HOLE__:
return
for arg in args:
if self._has(arg, attrs, types):
_select(path, arg)
_select(self._epath, expr)
return result | IndexError | dataset/ETHPy150Open sympy/sympy/sympy/simplify/epathtools.py/EPath.select |
5,214 | @classmethod
def from_entity(cls, entity):
"""Load from entity to class based on discriminator.
Rather than instantiating a new Model instance based on the kind
mapping, this creates an instance of the correct model class based
on the entities class-key.
Args:
entity: Entity loaded directly from datastore.
Raises:
KindError when there is no class mapping based on discriminator.
"""
if (_CLASS_KEY_PROPERTY in entity and
tuple(entity[_CLASS_KEY_PROPERTY]) != cls.class_key()):
key = tuple(entity[_CLASS_KEY_PROPERTY])
try:
poly_class = _class_map[key]
except __HOLE__:
raise db.KindError('No implementation for class \'%s\'' % (key,))
return poly_class.from_entity(entity)
return super(PolyModel, cls).from_entity(entity) | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/db/polymodel.py/PolyModel.from_entity |
5,215 | @csrf_exempt
@RequireLogin(role='admin')
def start(request):
"""
Start the proxy.
"""
if request.method != "GET":
return HttpResponse(status=405)
from api.models import redis_wrapper
r = redis_wrapper.init_redis()
response_key = str(ObjectId())
redis_wrapper.publish_to_proxy(json.dumps({
"operation": "start",
"param": None,
"key": response_key,
}))
for i in range(0, 50):
response = r.get(response_key)
if response is not None:
try:
response = json.loads(response)
except __HOLE__:
return HttpResponse(status=500)
if ('code' in response) and (response['code'] == 200):
return JsonResponse({"proxyResponse": response}, status=200)
else:
return HttpResponse(status=500)
else:
time.sleep(.1) # sleep 100ms
return HttpResponse(status=408) | ValueError | dataset/ETHPy150Open emccode/heliosburn/heliosburn/django/hbproject/api/views/proxy.py/start |
5,216 | @csrf_exempt
@RequireLogin(role='admin')
def stop(request):
"""
Stop the proxy.
"""
if request.method != "GET":
return HttpResponse(status=405)
from api.models import redis_wrapper
r = redis_wrapper.init_redis()
response_key = str(ObjectId())
redis_wrapper.publish_to_proxy(json.dumps({
"operation": "stop",
"param": None,
"key": response_key,
}))
for i in range(0, 50):
response = r.get(response_key)
if response is not None:
try:
response = json.loads(response)
except __HOLE__:
return HttpResponse(status=500)
if ('code' in response) and (response['code'] == 200):
return JsonResponse({"proxyResponse": response}, status=200)
else:
return HttpResponse(status=500)
else:
time.sleep(.1) # sleep 100ms
return HttpResponse(status=408) | ValueError | dataset/ETHPy150Open emccode/heliosburn/heliosburn/django/hbproject/api/views/proxy.py/stop |
5,217 | def query(self, query, timeout = None):
try:
return self.typeToMethod[query.type](str(query.name), timeout)
except __HOLE__, e:
return defer.fail(failure.Failure(NotImplementedError(str(self.__class__) + " " + str(query.type)))) | KeyError | dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/names/common.py/ResolverBase.query |
5,218 | def build_extension(self, ext_name, configs = []):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module name
module_name = ext_name
if '.' not in ext_name:
module_name = '.'.join(['markdown.extensions', ext_name])
# Try loading the extension first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name, {}, {}, [module_name.rpartition('.')[0]])
except ImportError:
module_name_old_style = '_'.join(['mdx', ext_name])
try: # Old style (mdx_<extension>)
module = __import__(module_name_old_style)
except __HOLE__:
logger.warn("Failed loading extension '%s' from '%s' or '%s'"
% (ext_name, module_name, module_name_old_style))
# Return None so we don't try to initiate none-existant extension
return None
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError, e:
logger.warn("Failed to initiate extension '%s': %s" % (ext_name, e))
return None | ImportError | dataset/ETHPy150Open isnowfy/pydown/markdown/__init__.py/Markdown.build_extension |
5,219 | def set_output_format(self, format):
""" Set the output format for the class instance. """
try:
self.serializer = self.output_formats[format.lower()]
except __HOLE__:
raise KeyError('Invalid Output Format: "%s". Use one of %s.' \
% (format, self.output_formats.keys()))
return self | KeyError | dataset/ETHPy150Open isnowfy/pydown/markdown/__init__.py/Markdown.set_output_format |
5,220 | def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return u"" # a blank unicode string
try:
source = unicode(source)
except UnicodeDecodeError, e:
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
source = source.replace(util.STX, "").replace(util.ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = re.sub(r'\n\s+\n', '\n\n', source)
source = source.expandtabs(self.tab_length)
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index('<%s>'%self.doc_tag)+len(self.doc_tag)+2
end = output.rindex('</%s>'%self.doc_tag)
output = output[start:end].strip()
except __HOLE__:
if output.strip().endswith('<%s />'%self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level tags. Document=%r' % output.strip())
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip() | ValueError | dataset/ETHPy150Open isnowfy/pydown/markdown/__init__.py/Markdown.convert |
5,221 | def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except __HOLE__:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger()) | ValueError | dataset/ETHPy150Open openstack/ooi/ooi/tests/base.py/TestCase.setUp |
5,222 | @rbac(('owner', 'user'))
def release(self, server):
"""
Shut-down :class:`ObjServer` `server`.
server: :class:`ObjServer`
Server to be shut down.
"""
try:
address = server._token.address
except __HOLE__:
address = 'not-a-proxy'
self._logger.debug('release %r', server)
self._logger.debug(' at %r', address)
try:
manager, root_dir, owner = self._managers[server]
except KeyError:
# Not identical to any of our proxies.
# Could still be a reference to the same remote object.
try:
server_host = server.host
server_pid = server.pid
except Exception:
self._logger.error("release: can't identify server at %r",
address)
raise ValueError("can't identify server at %r" % (address,))
for key in self._managers.keys():
if key.host == server_host and key.pid == server_pid:
manager, root_dir, owner = self._managers[key]
server = key
break
else:
self._logger.error('release: server %r not found', server)
for key in self._managers.keys():
self._logger.debug(' %r', key)
self._logger.debug(' at %r', key._token.address)
raise ValueError('server %r not found' % server)
if get_credentials().user != owner.user:
raise RoleError('only the owner can release')
manager.shutdown()
server._close.cancel()
del self._managers[server]
keep_dirs = int(os.environ.get('OPENMDAO_KEEPDIRS', '0'))
if not keep_dirs and os.path.exists(root_dir):
shutil.rmtree(root_dir, onerror=onerror) | AttributeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/objserverfactory.py/ObjServerFactory.release |
5,223 | @rbac(('owner', 'user'))
def create(self, typname, version=None, server=None,
res_desc=None, **ctor_args):
"""
Create a new `typname` object in `server` or a new
:class:`ObjectServer`. Returns a proxy for for the new object.
Starts servers in a subdirectory of the current directory.
typname: string
Type of object to create. If null, then a proxy for the new
:class:`ObjServer` is returned.
version: string or None
Version of `typname` to create.
server: proxy
:class:`ObjServer` on which to create `typname`.
If none, then a new server is created.
res_desc: dict or None
Required resources. ``working_directory`` is used to set a
created server's directory, other keys are ignored.
If `allow_shell` has been set, then an absolute directory
reference may be used (including '~' expansion). If not, then
the reference must be relative and the working directory will be
relative to the factory's directory. If the directory already
exists, a new name will be used of the form ``<directory>_N``
ctor_args: dict
Other constructor arguments.
If `name` or `allowed_users` are specified, they are used when
creating the :class:`ObjServer`. If no `allowed_users` are
specified, the server is private to the current user.
"""
self._logger.info('create typname %r, version %r server %s,'
' res_desc %s, args %s', typname, version, server,
res_desc, ctor_args)
if server is None:
name = ctor_args.get('name', '')
if not name:
name = 'Server_%d' % (len(self._managers) + 1)
allowed_users = ctor_args.get('allowed_users')
if not allowed_users:
credentials = get_credentials()
allowed_users = {credentials.user: credentials.public_key}
else:
del ctor_args['allowed_users']
if self._address is None or \
isinstance(self._address, basestring) or \
self._allow_tunneling:
# Local access only via pipe if factory accessed by pipe
# or factory is accessed via tunnel.
address = None
else:
# Network access via same IP as factory, system-selected port.
address = (self._address[0], 0)
manager = self.manager_class(address, self._authkey, name=name,
allowed_users=allowed_users)
# Set (unique) working directory of server.
# Server cleanup removes this directory, so we avoid any
# existing directory to not delete existing files.
base = None
if res_desc is not None:
base = res_desc.get('working_directory')
if base:
if self._allow_shell: # Absolute allowed.
base = os.path.expanduser(base)
elif os.path.isabs(base) or base.startswith('..'):
raise ValueError('working_directory %r must be subdirectory'
% base)
res_desc = res_desc.copy()
del res_desc['working_directory']
if not base:
base = name
count = 1
root_dir = base
while os.path.exists(root_dir):
count += 1
root_dir = '%s_%d' % (base, count)
os.mkdir(root_dir)
# On Windows, when running the full test suite under Nose,
# starting the process starts a new Nose test session, which
# will eventually get here and start a new Nose session, which...
orig_main = None
if sys.platform == 'win32': #pragma no cover
scripts = ('openmdao-script.py', 'openmdao_test-script.py')
try:
main_file = sys.modules['__main__'].__file__
except __HOLE__:
pass
else:
if main_file.endswith(scripts):
orig_main = main_file
sys.modules['__main__'].__file__ = \
pkg_resources.resource_filename('openmdao.main',
'objserverfactory.py')
owner = get_credentials()
self._logger.log(LOG_DEBUG2, '%s starting server %r in dir %s',
owner, name, root_dir)
try:
manager.start(cwd=root_dir,
log_level=self._logger.getEffectiveLevel())
finally:
if orig_main is not None: #pragma no cover
sys.modules['__main__'].__file__ = orig_main
self._logger.info('new server %r for %s', name, owner)
self._logger.info(' in dir %s', root_dir)
self._logger.info(' listening on %s', manager.address)
server_class = getattr(manager, self.server_classname)
server = server_class(name=name, allow_shell=self._allow_shell,
allowed_types=self._allowed_types)
self._managers[server] = (manager, root_dir, owner)
if typname:
obj = server.create(typname, version, None, res_desc, **ctor_args)
else:
obj = server
self._logger.log(LOG_DEBUG2, 'create returning %r at %r',
obj, obj._token.address)
return obj | AttributeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/objserverfactory.py/ObjServerFactory.create |
5,224 | def __init__(self, name='', allow_shell=False, allowed_types=None):
self._allow_shell = allow_shell
if allowed_types is None:
allowed_types = [typname for typname, version
in get_available_types()]
self._allowed_types = allowed_types
self.host = platform.node()
self.pid = os.getpid()
self.name = name or ('sim-%d' % self.pid)
self.version = __version__
self._root_dir = os.getcwd()
self._logger = logging.getLogger(self.name)
self._logger.info('PID: %d, allow_shell %s',
os.getpid(), self._allow_shell)
print '%s %r PID: %d, allow_shell %s' \
% (self.__class__.__name__, self.name, os.getpid(),
self._allow_shell)
sys.stdout.flush()
SimulationRoot.chroot(self._root_dir)
self.tlo = None
# Ensure Traits Array support is initialized. The code contains
# globals for numpy symbols that are initialized within
# AbstractArray.__init__() which won't be executed if we simply
# load up egg state (or don't do a real fork, like Windows).
try:
import numpy
except __HOLE__:
pass
else:
from traits.trait_numeric import AbstractArray
dummy = AbstractArray() | ImportError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/objserverfactory.py/ObjServer.__init__ |
5,225 | @rbac('owner')
def execute_command(self, resource_desc):
"""
Run command described by `resource_desc` in a subprocess if this
server's `allow_shell` attribute is True.
resource_desc: dict
Contains job description.
The current environment, along with any 'job_environment' specification,
is in effect while running 'remote_command'.
If 'input_path' is not specified, ``/dev/null`` or ``nul:`` is used.
If 'output_path' is not specified, ``<remote_command>.stdout`` is used.
If neither 'error_path' nor 'join_files' are specified,
``<remote_command>.stderr`` is used.
If specified in the 'resource_limits' dictionary, 'wallclock_time' is
used as a timeout.
All other queuing resource keys are ignored.
The ``HOME_DIRECTORY`` and ``WORKING_DIRECTORY`` placeholders are
ignored.
"""
try:
job_name = resource_desc['job_name']
except KeyError:
job_name = ''
command = resource_desc['remote_command']
self._check_path(command, 'execute_command')
base = os.path.basename(command)
command = [command]
if 'args' in resource_desc:
command.extend(resource_desc['args'])
self._logger.debug('execute_command %s %r', job_name, command)
if not self._allow_shell:
self._logger.error('attempt to execute %r by %r', command,
get_credentials().user)
raise RuntimeError('shell access is not allowed by this server')
env_vars = resource_desc.get('job_environment')
try:
stdin = resource_desc['input_path']
self._check_path(stdin, 'execute_command')
except KeyError:
stdin = DEV_NULL
try:
stdout = resource_desc['output_path']
self._check_path(stdout, 'execute_command')
except __HOLE__:
stdout = base+'.stdout'
try:
stderr = resource_desc['error_path']
self._check_path(stderr, 'execute_command')
except KeyError:
try:
join_files = resource_desc['join_files']
except KeyError:
stderr = base+'.stderr'
else:
stderr = STDOUT if join_files else base+'.stderr'
limits = resource_desc.get('resource_limits', {})
timeout = limits.get('wallclock_time', 0)
poll_delay = 1
try:
process = ShellProc(command, stdin, stdout, stderr, env_vars)
except Exception as exc:
self._logger.error('exception creating process: %s', exc)
raise
self._logger.debug(' PID = %d', process.pid)
return_code, error_msg = process.wait(poll_delay, timeout)
self._logger.debug(' returning %s', (return_code, error_msg))
return (return_code, error_msg) | KeyError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/objserverfactory.py/ObjServer.execute_command |
5,226 | def connect(address, port, tunnel=False, authkey='PublicKey', pubkey=None,
logfile=None):
"""
Connects to the server at `address` and `port` using `key` and returns
a (shared) proxy for the associated :class:`ObjServerFactory`.
address: string
IP address for server or pipe filename.
port: int
Server port. If < 0, `address` is a pipe filename.
tunnel: bool
Connect via SSH tunnel.
authkey:
Server authorization key.
pubkey:
Server public key; required if `authkey` is 'PublicKey'.
logfile:
Location of server's log file, if known.
"""
if port < 0:
key = address
else:
key = (address, port)
try:
return _PROXIES[key]
except __HOLE__:
# Requires ssh setup.
if tunnel: # pragma no cover
location, cleanup = setup_tunnel(address, port)
atexit.register(*cleanup)
else:
location = key
via = ' (via tunnel)' if tunnel else ''
log = ' at %s' % logfile if logfile else ''
if not OpenMDAO_Proxy.manager_is_alive(location):
raise RuntimeError("Can't connect to server at %s:%s%s. It appears"
" to be offline. Please check the server log%s."
% (address, port, via, log))
mgr = _FactoryManager(location, authkey, pubkey=pubkey)
try:
mgr.connect()
except EOFError:
raise RuntimeError("Can't connect to server at %s:%s%s. It appears"
" to be rejecting the connection. Please check"
" the server log%s." % (address, port, via, log))
proxy = mgr.openmdao_main_objserverfactory_ObjServerFactory()
if proxy.version != __version__:
logging.warning('Server version %r different than local version %r',
proxy.version, __version__)
_PROXIES[key] = proxy
return proxy | KeyError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/objserverfactory.py/connect |
5,227 | def main(): #pragma no cover
"""
OpenMDAO factory service process.
Usage: python objserverfactory.py [--allow-public][--allow-shell][--hosts=filename][--types=filename][--users=filename][--address=address][--port=number][--prefix=name][--tunnel][--resources=filename][--log-host=hostname][--log-port=number][--log-prefix=string]
--allow-public:
Allows access by anyone from any allowed host. Use with care!
--allow-shell:
Allows access to :meth:`execute_command` and :meth:`load_model`.
Use with care!
--hosts: string
Filename for allowed hosts specification. Default ``hosts.allow``.
Ignored if '--users' is specified.
The file should contain IPv4 host addresses, IPv4 domain addresses,
or hostnames, one per line. Blank lines are ignored, and '#' marks the
start of a comment which continues to the end of the line.
For security reasons this file must be accessible only by the user
running this server.
--types: string
Filename for allowed types specification.
If not specified then allow types listed by
:meth:`factorymanager.get_available_types`.
The file should contain one type name per line.
--users: string
Filename for allowed users specification.
Ignored if '--allow-public' is specified.
Default is ``~/.ssh/authorized_keys``, other files should be of the
same format: each line has ``key-type public-key-data user@host``,
where `user` is the username on `host`. `host` will be translated to an
IPv4 address and included in the allowed hosts list.
Note that this ``user@host`` form is not necessarily enforced by
programs which generate keys.
For security reasons this file must be accessible only by the user
running this server.
--address: string
IPv4 address, hostname, or pipe name.
Default is the host's default IPv4 address.
--port: int
Server port (default of 0 implies next available port).
Note that ports below 1024 typically require special privileges.
If port is negative, then a local pipe is used for communication.
--prefix: string
Prefix for configuration and stdout/stderr files (default ``server``).
--tunnel:
Report host IP address but listen for connections from a local
SSH tunnel.
--resources: string
Filename for resource configuration. If not specified then the
default of ``~/.openmdao/resources.cfg`` will be used.
--log-host: string
Hostname to send remote log messages to.
--log-port: int
Port on `log-host` to send remote log messages to.
--log-prefix: string
Prefix to apply to remote log messages. Default is ``pid@host``.
If ``prefix.key`` exists, it is read for an authorization key string.
Otherwise, public key authorization and encryption is used.
Allowed hosts *must* be specified if `port` is >= 0. Only allowed hosts
may connect to the server.
Once initialized ``prefix.cfg`` is written with address, port, and
public key information.
"""
parser = optparse.OptionParser()
parser.add_option('--address', action='store', type='str',
help='Network address to serve.')
parser.add_option('--allow-public', action='store_true', default=False,
help='Allows access by any user, use with care!')
parser.add_option('--allow-shell', action='store_true', default=False,
help='Allows potential shell access, use with care!')
parser.add_option('--hosts', action='store', type='str',
default='hosts.allow', help='Filename for allowed hosts')
parser.add_option('--types', action='store', type='str',
help='Filename for allowed types')
parser.add_option('--users', action='store', type='str',
default='~/.ssh/authorized_keys',
help='Filename for allowed users')
parser.add_option('--port', action='store', type='int', default=0,
help='Server port (0 implies next available port)')
parser.add_option('--prefix', action='store', default='server',
help='Prefix for config and stdout/stderr files')
parser.add_option('--tunnel', action='store_true', default=False,
help='Report host IP address but listen for connections'
' from a local SSH tunnel')
parser.add_option('--resources', action='store', type='str',
default=None, help='Filename for resource configuration')
parser.add_option('--log-host', action='store', type='str',
default=None, help='hostname for remote log messages')
parser.add_option('--log-port', action='store', type='int',
default=None, help='port for remote log messages')
parser.add_option('--log-prefix', action='store', type='str',
default=None, help='prefix for remote log messages')
options, arguments = parser.parse_args()
if arguments:
parser.print_help()
sys.exit(1)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if options.log_host and options.log_port:
install_remote_handler(options.log_host, int(options.log_port),
options.log_prefix)
server_key = options.prefix+'.key'
server_cfg = options.prefix+'.cfg'
global _SERVER_CFG
_SERVER_CFG = server_cfg
# Get authkey.
authkey = 'PublicKey'
try:
with open(server_key, 'r') as inp:
authkey = inp.readline().strip()
os.remove(server_key)
except __HOLE__:
pass
if options.allow_shell:
msg = 'Shell access is ALLOWED'
logger.warning(msg)
print msg
allowed_users = None
allowed_hosts = None
# Get allowed_users.
if options.allow_public:
allowed_users = None
msg = 'Public access is ALLOWED'
logger.warning(msg)
print msg
if options.port >= 0:
# Get allowed_hosts.
if os.path.exists(options.hosts):
try:
allowed_hosts = read_allowed_hosts(options.hosts)
except Exception as exc:
msg = "Can't read allowed hosts file %r: %s" \
% (options.hosts, exc)
logger.error(msg)
print msg
sys.exit(1)
else:
msg = 'Allowed hosts file %r does not exist.' % options.hosts
logger.error(msg)
print msg
sys.exit(1)
if not allowed_hosts:
msg = 'No hosts in allowed hosts file %r.' % options.hosts
logger.error(msg)
print msg
sys.exit(1)
else:
if os.path.exists(options.users):
try:
allowed_users = read_authorized_keys(options.users, logger)
except Exception as exc:
msg = "Can't read allowed users file %r: %s" \
% (options.users, exc)
logger.error(msg)
print msg
sys.exit(1)
else:
msg = 'Allowed users file %r does not exist.' % options.users
logger.error(msg)
print msg
sys.exit(1)
if not allowed_users:
msg = 'No users in allowed users file %r.' % options.users
logger.error(msg)
print msg
sys.exit(1)
# Get allowed_types.
allowed_types = None
if options.types:
if os.path.exists(options.types):
allowed_types = []
with open(options.types, 'r') as inp:
line = inp.readline()
while line:
line = line.strip()
if line:
allowed_types.append(line)
line = inp.readline()
else:
msg = 'Allowed types file %r does not exist.' % options.types
logger.error(msg)
print msg
sys.exit(1)
# Optionally configure resources.
if options.resources:
# Import here to avoid import loop.
from openmdao.main.resource import ResourceAllocationManager as RAM
RAM.configure(options.resources)
# Get address and create manager.
if options.port >= 0:
if options.address: # Specify IPv4/hostname.
address = (options.address, options.port)
else:
address = (platform.node(), options.port)
else:
if options.address: # Specify pipename.
address = options.address
else:
address = None
logger.info('Starting FactoryManager %s %r', address, keytype(authkey))
current_process().authkey = authkey
bind_address = ('127.0.0.1', options.port) if options.tunnel else address
manager = _FactoryManager(bind_address, authkey, name='Factory',
allowed_hosts=allowed_hosts,
allowed_users=allowed_users,
allow_tunneling=options.tunnel)
# Set defaults for created ObjServerFactories.
# There isn't a good method to propagate these through the manager.
ObjServerFactory._address = address
ObjServerFactory._allow_shell = options.allow_shell
ObjServerFactory._allowed_types = allowed_types
ObjServerFactory._allow_tunneling = options.tunnel
# Get server, retry if specified address is in use.
server = None
retries = 0
while server is None:
try:
server = manager.get_server()
except socket.error as exc:
if str(exc).find('Address already in use') >= 0:
if retries < 10:
msg = 'Address %s in use, retrying...' % (address,)
logger.debug(msg)
print msg
time.sleep(5)
retries += 1
else:
msg = 'Address %s in use, too many retries.' % (address,)
logger.error(msg)
print msg
sys.exit(1)
else:
raise
# Record configuration.
real_ip = None if address is None else address[0]
write_server_config(server, _SERVER_CFG, real_ip)
msg = 'Serving on %s' % (server.address,)
logger.info(msg)
print msg
sys.stdout.flush()
# And away we go...
signal.signal(signal.SIGTERM, _sigterm_handler)
try:
server.serve_forever()
finally:
_cleanup()
sys.exit(0) | IOError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/objserverfactory.py/main |
5,228 | def handle(self, *args, **options):
try:
xls_filepath = args[0]
except __HOLE__:
raise CommandError(_("You must provide the path to the xls file."))
# make sure path exists
if not os.path.exists(xls_filepath):
raise CommandError(
_("The xls file '%s' does not exist.") %
xls_filepath)
try:
username = args[1]
except IndexError:
raise CommandError(_(
"You must provide the username to publish the form to."))
# make sure user exists
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError(_("The user '%s' does not exist.") % username)
# wasteful but we need to get the id_string beforehand
survey = create_survey_from_xls(xls_filepath)
# check if a form with this id_string exists for this user
form_already_exists = XForm.objects.filter(
user=user, id_string=survey.id_string).count() > 0
# id_string of form to replace, if any
id_string = None
if form_already_exists:
if 'replace' in options and options['replace']:
id_string = survey.id_string
self.stdout.write(_("Form already exist, replacing ..\n"))
else:
raise CommandError(_(
"The form with id_string '%s' already exists, use the -r "
"option to replace it.") % survey.id_string)
else:
self.stdout.write(_("Form does NOT exist, publishing ..\n"))
# publish
xls_file = django_file(
xls_filepath, 'xls_file', 'application/vnd.ms-excel')
publish_xls_form(xls_file, user, id_string)
self.stdout.write(_("Done..\n")) | IndexError | dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/management/commands/publish_xls.py/Command.handle |
5,229 | def store_embedded_files(self, zfile):
embedded_files = self.visitor.get_embedded_file_list()
for source, destination in embedded_files:
if source is None:
continue
try:
# encode/decode
destination1 = destination.decode('latin-1').encode('utf-8')
zfile.write(source, destination1)
except __HOLE__ as e:
self.document.reporter.warning(
"Can't open file %s." % (source, )) | OSError | dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python3/docutils/writers/odf_odt/__init__.py/Writer.store_embedded_files |
5,230 | def get_image_width_height(self, node, attr):
size = None
if attr in node.attributes:
size = node.attributes[attr]
unit = size[-2:]
if unit.isalpha():
size = size[:-2]
else:
unit = 'px'
try:
size = float(size)
except __HOLE__ as e:
self.document.reporter.warning(
'Invalid %s for image: "%s"' % (
attr, node.attributes[attr]))
size = [size, unit]
return size | ValueError | dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python3/docutils/writers/odf_odt/__init__.py/ODFTranslator.get_image_width_height |
5,231 | def get_image_scale(self, node):
if 'scale' in node.attributes:
try:
scale = int(node.attributes['scale'])
if scale < 1: # or scale > 100:
self.document.reporter.warning(
'scale out of range (%s), using 1.' % (scale, ))
scale = 1
scale = scale * 0.01
except __HOLE__ as e:
self.document.reporter.warning(
'Invalid scale for image: "%s"' % (
node.attributes['scale'], ))
else:
scale = 1.0
return scale | ValueError | dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python3/docutils/writers/odf_odt/__init__.py/ODFTranslator.get_image_scale |
5,232 | def _create_site():
"""Creates a site configured in settings.py."""
try:
site = auth_models.SitesCollection().create_item(
auth_models.SINGLE_SITE_ID)
site.aliases.create_item(SITE_URL)
return site
except __HOLE__ as ex:
raise ImproperlyConfigured('Failed to create site %s: %s'
% (SITE_URL, ex)) | ValidationError | dataset/ETHPy150Open wrr/wwwhisper/wwwhisper_admin/__init__.py/_create_site |
5,233 | def _create_initial_locations(site):
"""Creates all locations listed in WWWHISPER_INITIAL_LOCATIONS setting."""
locations_paths = getattr(settings, 'WWWHISPER_INITIAL_LOCATIONS', [])
for path in locations_paths:
try:
site.locations.create_item(path)
except __HOLE__ as ex:
raise ImproperlyConfigured('Failed to create location %s: %s'
% (path, ', '.join(ex.messages))) | ValidationError | dataset/ETHPy150Open wrr/wwwhisper/wwwhisper_admin/__init__.py/_create_initial_locations |
5,234 | def _create_initial_admins(site):
"""Creates all users listed in WWWHISPER_INITIAL_ADMINS setting."""
emails = getattr(settings, 'WWWHISPER_INITIAL_ADMINS', [])
for email in emails:
try:
user = site.users.create_item(email)
except __HOLE__ as ex:
raise ImproperlyConfigured('Failed to create admin user %s: %s'
% (email, ', '.join(ex.messages))) | ValidationError | dataset/ETHPy150Open wrr/wwwhisper/wwwhisper_admin/__init__.py/_create_initial_admins |
5,235 | def add(self, name, auth_required=True, list_command=True, **validators):
"""Create a decorator that registers a handler and validation rules.
Additional keyword arguments are treated as converters/validators to
apply to tokens converting them to proper Python types.
Requirements for valid handlers:
- must accept a context argument as the first arg.
- may not use variable keyword arguments, ``**kwargs``.
- may use variable arguments ``*args`` *or* a mix of required and
optional arguments.
Decorator returns the unwrapped function so that tests etc can use the
functions with values with correct python types instead of strings.
:param string name: Name of the command being registered.
:param bool auth_required: If authorization is required.
:param bool list_command: If command should be listed in reflection.
"""
def wrapper(func):
if name in self.handlers:
raise ValueError('%s already registered' % name)
args, varargs, keywords, defaults = inspect.getargspec(func)
defaults = dict(zip(args[-len(defaults or []):], defaults or []))
if not args and not varargs:
raise TypeError('Handler must accept at least one argument.')
if len(args) > 1 and varargs:
raise TypeError(
'*args may not be combined with regular arguments')
if not set(validators.keys()).issubset(args):
raise TypeError('Validator for non-existent arg passed')
if keywords:
raise TypeError('**kwargs are not permitted')
def validate(*args, **kwargs):
if varargs:
return func(*args, **kwargs)
try:
callargs = inspect.getcallargs(func, *args, **kwargs)
except __HOLE__:
raise exceptions.MpdArgError(
'wrong number of arguments for "%s"' % name)
for key, value in callargs.items():
default = defaults.get(key, object())
if key in validators and value != default:
try:
callargs[key] = validators[key](value)
except ValueError:
raise exceptions.MpdArgError('incorrect arguments')
return func(**callargs)
validate.auth_required = auth_required
validate.list_command = list_command
self.handlers[name] = validate
return func
return wrapper | TypeError | dataset/ETHPy150Open mopidy/mopidy/mopidy/mpd/protocol/__init__.py/Commands.add |
5,236 | def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except __HOLE__:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets) | AttributeError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/collapse_addresses |
5,237 | @classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except __HOLE__:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen | ValueError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/_IPAddressBase._prefix_from_prefix_string |
5,238 | @classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except __HOLE__:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str) | ValueError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/_IPAddressBase._prefix_from_ip_string |
5,239 | def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except __HOLE__:
return NotImplemented | AttributeError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/_BaseAddress.__eq__ |
5,240 | def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except __HOLE__:
return NotImplemented | AttributeError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/_BaseNetwork.__eq__ |
5,241 | @classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except __HOLE__ as exc:
raise AddressValueError("%s in %r" % (exc, ip_str)) | ValueError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/_BaseV4._ip_int_from_string |
5,242 | def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except __HOLE__:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False | ValueError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/_BaseV4._is_hostmask |
5,243 | def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except __HOLE__:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False | AttributeError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/IPv4Interface.__eq__ |
5,244 | def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except __HOLE__:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False | AttributeError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/IPv4Interface.__lt__ |
5,245 | @classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except __HOLE__ as exc:
raise AddressValueError("%s in %r" % (exc, ip_str)) | ValueError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/_BaseV6._ip_int_from_string |
5,246 | def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except __HOLE__:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False | AttributeError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/IPv6Interface.__eq__ |
5,247 | def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except __HOLE__:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False | AttributeError | dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/ipaddress.py/IPv6Interface.__lt__ |
5,248 | def fileList(paths, relative=False, folders=False):
"""
Generate a recursive list of files from a given path.
"""
try:
basestring
except __HOLE__:
# Python 3
basestring = unicode = str
if isinstance(paths, basestring):
paths = [paths]
files = []
for path in paths:
for fileName in os.listdir(path):
if fileName.startswith('.'):
continue
filePath = os.path.join(path, fileName)
if os.path.isdir(filePath):
if folders:
files.append(filePath)
files += fileList(filePath)
else:
files.append(filePath)
if relative:
files = map(lambda x: x[len(path) + 1:], files)
return files | NameError | dataset/ETHPy150Open randomknowledge/Cactus_Refactored/setup.py/fileList |
5,249 | def get_stop_words(language):
try:
stopwords_data = pkgutil.get_data("sumy", "data/stopwords/%s.txt" % language)
except __HOLE__ as e:
raise LookupError("Stop-words are not available for language %s." % language)
return parse_stop_words(stopwords_data) | IOError | dataset/ETHPy150Open miso-belica/sumy/sumy/utils.py/get_stop_words |
5,250 | @classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (__HOLE__, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if b'=' not in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False) | ValueError | dataset/ETHPy150Open GoogleCloudPlatform/appengine-flask-skeleton/lib/werkzeug/contrib/securecookie.py/SecureCookie.unserialize |
5,251 | def test_PacmanInstaller():
from rosdep2.platforms.arch import PacmanInstaller
@patch.object(PacmanInstaller, 'get_packages_to_install')
def test(mock_method):
installer = PacmanInstaller()
mock_method.return_value = []
assert [] == installer.get_install_command(['fake'])
# no interactive option implemented yet
mock_method.return_value = ['a', 'b']
expected = [['sudo', '-H', 'pacman', '-Sy', '--needed', 'a'],
['sudo', '-H', 'pacman', '-Sy', '--needed', 'b']]
val = installer.get_install_command(['whatever'], interactive=False)
assert val == expected, val
expected = [['sudo', '-H', 'pacman', '-Sy', '--needed', 'a'],
['sudo', '-H', 'pacman', '-Sy', '--needed', 'b']]
val = installer.get_install_command(['whatever'], interactive=True)
assert val == expected, val
try:
test()
except __HOLE__:
traceback.print_exc()
raise | AssertionError | dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_arch.py/test_PacmanInstaller |
5,252 | @ajax_request
@login_required
def set_password(request):
"""sets/updates a user's password, follows the min requiremnent of
django-passwords settings in BE/settings/common.py
:PUT: {
'current_password': current_password,
'password_1': password_1,
'password_2': password_2
}
Returns::
{
'status': 'success'
}
"""
default_validators = [
validate_length, common_sequences, dictionary_words, complexity
]
if request.method != 'PUT':
return {'status': 'error', 'message': 'only HTTP PUT allowed'}
body = json.loads(request.body)
current_password = body.get('current_password')
p1 = body.get('password_1')
p2 = body.get('password_2')
if not request.user.check_password(current_password):
return {'status': 'error', 'message': 'current password is not valid'}
if p1 is None or p1 != p2:
return {'status': 'error', 'message': 'entered password do not match'}
# validate password from django-password settings
for validator in default_validators:
try:
validator(p2)
except __HOLE__, e:
return {'status': 'error', 'message': e.message}
request.user.set_password(p1)
request.user.save()
return {'status': 'success'} | ValidationError | dataset/ETHPy150Open buildingenergy/buildingenergy-platform/seed/views/accounts.py/set_password |
5,253 | def start(ctrl):
"""Start the Helper controller either in the foreground or as a daemon
process.
:param ctrl helper.Controller: The controller class handle to create and run
"""
args = parser.parse()
obj = ctrl(args, platform.operating_system())
if args.foreground:
try:
obj.start()
except __HOLE__:
obj.stop()
else:
try:
with platform.Daemon(obj) as daemon:
daemon.start()
except (OSError, ValueError) as error:
sys.stderr.write('\nError starting %s: %s\n\n' %
(sys.argv[0], error))
sys.exit(1) | KeyboardInterrupt | dataset/ETHPy150Open gmr/helper/helper/__init__.py/start |
5,254 | def _iterate_timeout(timeout, message, wait=2):
"""Iterate and raise an exception on timeout.
This is a generator that will continually yield and sleep for
wait seconds, and if the timeout is reached, will raise an exception
with <message>.
"""
try:
wait = float(wait)
except __HOLE__:
raise exc.OpenStackCloudException(
"Wait value must be an int or float value. {wait} given"
" instead".format(wait=wait))
start = time.time()
count = 0
while (timeout is None) or (time.time() < start + timeout):
count += 1
yield count
log.debug('Waiting {wait} seconds'.format(wait=wait))
time.sleep(wait)
raise exc.OpenStackCloudTimeout(message) | ValueError | dataset/ETHPy150Open openstack-infra/shade/shade/_utils.py/_iterate_timeout |
5,255 | def safe_dict_min(key, data):
"""Safely find the minimum for a given key in a list of dict objects.
This will find the minimum integer value for specific dictionary key
across a list of dictionaries. The values for the given key MUST be
integers, or string representations of an integer.
The dictionary key does not have to be present in all (or any)
of the elements/dicts within the data set.
:param string key: The dictionary key to search for the minimum value.
:param list data: List of dicts to use for the data set.
:returns: None if the field was not not found in any elements, or
the minimum value for the field otherwise.
"""
min_value = None
for d in data:
if (key in d) and (d[key] is not None):
try:
val = int(d[key])
except __HOLE__:
raise exc.OpenStackCloudException(
"Search for minimum value failed. "
"Value for {key} is not an integer: {value}".format(
key=key, value=d[key])
)
if (min_value is None) or (val < min_value):
min_value = val
return min_value | ValueError | dataset/ETHPy150Open openstack-infra/shade/shade/_utils.py/safe_dict_min |
5,256 | def safe_dict_max(key, data):
"""Safely find the maximum for a given key in a list of dict objects.
This will find the maximum integer value for specific dictionary key
across a list of dictionaries. The values for the given key MUST be
integers, or string representations of an integer.
The dictionary key does not have to be present in all (or any)
of the elements/dicts within the data set.
:param string key: The dictionary key to search for the maximum value.
:param list data: List of dicts to use for the data set.
:returns: None if the field was not not found in any elements, or
the maximum value for the field otherwise.
"""
max_value = None
for d in data:
if (key in d) and (d[key] is not None):
try:
val = int(d[key])
except __HOLE__:
raise exc.OpenStackCloudException(
"Search for maximum value failed. "
"Value for {key} is not an integer: {value}".format(
key=key, value=d[key])
)
if (max_value is None) or (val > max_value):
max_value = val
return max_value | ValueError | dataset/ETHPy150Open openstack-infra/shade/shade/_utils.py/safe_dict_max |
5,257 | def low(data, queue=False, **kwargs):
'''
Execute a single low data call
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
try:
st_ = salt.state.State(__opts__, proxy=__proxy__)
except __HOLE__:
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
__context__['retcode'] = 1
return err
ret = st_.call(data)
if isinstance(ret, list):
__context__['retcode'] = 1
if salt.utils.check_state_result(ret):
__context__['retcode'] = 2
return ret | NameError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/low |
5,258 | def high(data, test=False, queue=False, **kwargs):
'''
Execute the compound calls stored in a single set of high data
This function is mostly intended for testing the state system andis not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = _get_opts(kwargs.get('localconfig'))
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
elif test is not None:
opts['test'] = test
else:
opts['test'] = __opts__.get('test', None)
pillar = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.State(__opts__, pillar, pillar_enc=pillar_enc, proxy=__proxy__,
context=__context__)
except __HOLE__:
st_ = salt.state.State(__opts__, pillar, pillar_enc=pillar_enc)
ret = st_.call_high(data)
_set_retcode(ret)
return ret | NameError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/high |
5,259 | def template_str(tem, queue=False, **kwargs):
'''
Execute the information stored in a string from an sls template
CLI Example:
.. code-block:: bash
salt '*' state.template_str '<Template String>'
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
try:
st_ = salt.state.State(__opts__, proxy=__proxy__)
except __HOLE__:
st_ = salt.state.State(__opts__)
ret = st_.call_template_str(tem)
_set_retcode(ret)
return ret | NameError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/template_str |
5,260 | def request(mods=None,
**kwargs):
'''
.. versionadded:: 2015.5.0
Request that the local admin execute a state run via
`salt-call state.run_request`
All arguments match state.apply
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request test
salt '*' state.request test,pkgs
'''
kwargs['test'] = True
ret = apply_(mods, **kwargs)
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
req = check_request()
req.update({kwargs.get('name', 'default'): {
'test_run': ret,
'mods': mods,
'kwargs': kwargs
}
})
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, __HOLE__):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return ret | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/request |
5,261 | def clear_request(name=None):
'''
.. versionadded:: 2015.5.0
Clear out the state execution request without executing it
CLI Example:
.. code-block:: bash
salt '*' state.clear_request
'''
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
serial = salt.payload.Serial(__opts__)
if not os.path.isfile(notify_path):
return True
if not name:
try:
os.remove(notify_path)
except (IOError, __HOLE__):
pass
else:
req = check_request()
if name in req:
req.pop(name)
else:
return False
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
with salt.utils.fopen(notify_path, 'w+b') as fp_:
serial.dump(req, fp_)
except (IOError, OSError):
msg = 'Unable to write state request file {0}. Check permission.'
log.error(msg.format(notify_path))
os.umask(cumask)
return True | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/clear_request |
5,262 | def run_request(name='default', **kwargs):
'''
.. versionadded:: 2015.5.0
Execute the pending state request
CLI Example:
.. code-block:: bash
salt '*' state.run_request
'''
req = check_request()
if name not in req:
return {}
n_req = req[name]
if 'mods' not in n_req or 'kwargs' not in n_req:
return {}
req[name]['kwargs'].update(kwargs)
if 'test' in n_req['kwargs']:
n_req['kwargs'].pop('test')
if req:
ret = apply_(n_req['mods'], **n_req['kwargs'])
try:
os.remove(os.path.join(__opts__['cachedir'], 'req_state.p'))
except (__HOLE__, OSError):
pass
return ret
return {} | IOError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/run_request |
5,263 | def highstate(test=None,
queue=False,
**kwargs):
'''
Retrieve the state data from the salt master for this minion and execute it
test
Run states in test-only (dry-run) mode
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.apply test pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionchanged:: 2016.3.0
GPG-encrypted CLI Pillar data is now supported via the GPG
renderer. See :ref:`here <encrypted-cli-pillar-data>` for details.
pillar_enc
Specify which renderer to use to decrypt encrypted data located within
the ``pillar`` value. Currently, only ``gpg`` is supported.
.. versionadded:: 2016.3.0
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
mock:
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
CLI Examples:
.. code-block:: bash
salt '*' state.highstate
salt '*' state.highstate whitelist=sls1_to_run,sls2_to_run
salt '*' state.highstate exclude=sls_to_exclude
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
salt '*' state.highstate pillar="{foo: 'Foo!', bar: 'Bar!'}"
'''
if _disabled(['highstate']):
log.debug('Salt highstate run is disabled. To re-enable, run state.enable highstate')
ret = {
'name': 'Salt highstate run is disabled. To re-enable, run state.enable highstate',
'result': 'False',
'comment': 'Disabled'
}
return ret
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
if test is None:
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
else:
opts['test'] = __opts__.get('test', None)
else:
opts['test'] = test
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'env\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('env')
if 'saltenv' in kwargs:
opts['environment'] = kwargs['saltenv']
pillar = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
if 'pillarenv' in kwargs:
opts['pillarenv'] = kwargs['pillarenv']
try:
st_ = salt.state.HighState(opts,
pillar,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
mocked=kwargs.get('mock', False))
except __HOLE__:
st_ = salt.state.HighState(opts,
pillar,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False))
st_.push_active()
try:
ret = st_.call_highstate(
exclude=kwargs.get('exclude', []),
cache=kwargs.get('cache', None),
cache_name=kwargs.get('cache_name', 'highstate'),
force=kwargs.get('force', False),
whitelist=kwargs.get('whitelist')
)
finally:
st_.pop_active()
if __salt__['config.option']('state_data', '') == 'terse' or \
kwargs.get('terse'):
ret = _filter_running(ret)
serial = salt.payload.Serial(__opts__)
cache_file = os.path.join(__opts__['cachedir'], 'highstate.p')
_set_retcode(ret)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
return ret | NameError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/highstate |
5,264 | def sls(mods,
saltenv=None,
test=None,
exclude=None,
queue=False,
pillarenv=None,
**kwargs):
'''
Execute the states in one or more SLS files
test
Run states in test-only (dry-run) mode
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.apply test pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionchanged:: 2016.3.0
GPG-encrypted CLI Pillar data is now supported via the GPG
renderer. See :ref:`here <encrypted-cli-pillar-data>` for details.
pillar_enc
Specify which renderer to use to decrypt encrypted data located within
the ``pillar`` value. Currently, only ``gpg`` is supported.
.. versionadded:: 2016.3.0
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
concurrent : False
Execute state runs concurrently instead of serially
.. warning::
This flag is potentially dangerous. It is designed for use when
multiple state runs can safely be run at the same time. Do *not*
use this flag for performance optimization.
saltenv : None
Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0
Argument name changed from ``env`` to ``saltenv``.
.. versionchanged:: 2014.7.0
If no saltenv is specified, the minion config will be checked for a
``saltenv`` parameter and if found, it will be used. If none is
found, ``base`` will be used. In prior releases, the minion config
was not checked and ``base`` would always be assumed when the
saltenv was not explicitly set.
pillarenv
Specify a Pillar environment to be used when applying states. By
default, all Pillar environments will be merged together and used.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
mock:
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' state.sls core,edit.vim dev
salt '*' state.sls core exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
salt '*' state.sls myslsfile pillar="{foo: 'Foo!', bar: 'Bar!'}"
'''
concurrent = kwargs.get('concurrent', False)
if 'env' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'env\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('env')
if saltenv is None:
if __opts__.get('environment', None):
saltenv = __opts__['environment']
else:
saltenv = 'base'
if not pillarenv:
if __opts__.get('pillarenv', None):
pillarenv = __opts__['pillarenv']
# Modification to __opts__ lost after this if-else
if queue:
_wait(kwargs.get('__pub_jid'))
else:
conflict = running(concurrent)
if conflict:
__context__['retcode'] = 1
return conflict
# Ensure desired environment
__opts__['environment'] = saltenv
__opts__['pillarenv'] = pillarenv
if isinstance(mods, list):
disabled = _disabled(mods)
else:
disabled = _disabled([mods])
if disabled:
for state in disabled:
log.debug('Salt state {0} run is disabled. To re-enable, run state.enable {0}'.format(state))
__context__['retcode'] = 1
return disabled
if not _check_pillar(kwargs):
__context__['retcode'] = 5
err = ['Pillar failed to render with the following messages:']
err += __pillar__['_errors']
return err
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
elif test is not None:
opts['test'] = test
else:
opts['test'] = __opts__.get('test', None)
pillar = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
serial = salt.payload.Serial(__opts__)
cfn = os.path.join(
__opts__['cachedir'],
'{0}.cache.p'.format(kwargs.get('cache_name', 'highstate'))
)
try:
st_ = salt.state.HighState(opts,
pillar,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
proxy=__proxy__,
context=__context__,
mocked=kwargs.get('mock', False))
except __HOLE__:
st_ = salt.state.HighState(opts,
pillar,
kwargs.get('__pub_jid'),
pillar_enc=pillar_enc,
mocked=kwargs.get('mock', False))
umask = os.umask(0o77)
if kwargs.get('cache'):
if os.path.isfile(cfn):
with salt.utils.fopen(cfn, 'rb') as fp_:
high_ = serial.load(fp_)
return st_.state.call_high(high_)
os.umask(umask)
if isinstance(mods, six.string_types):
mods = mods.split(',')
st_.push_active()
try:
high_, errors = st_.render_highstate({saltenv: mods})
if errors:
__context__['retcode'] = 1
return errors
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(',')
if '__exclude__' in high_:
high_['__exclude__'].extend(exclude)
else:
high_['__exclude__'] = exclude
ret = st_.state.call_high(high_)
finally:
st_.pop_active()
if __salt__['config.option']('state_data', '') == 'terse' or kwargs.get('terse'):
ret = _filter_running(ret)
cache_file = os.path.join(__opts__['cachedir'], 'sls.p')
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
__salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False)
with salt.utils.fopen(cache_file, 'w+b') as fp_:
serial.dump(ret, fp_)
except (IOError, OSError):
msg = 'Unable to write to SLS cache file {0}. Check permission.'
log.error(msg.format(cache_file))
_set_retcode(ret)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
try:
with salt.utils.fopen(cfn, 'w+b') as fp_:
try:
serial.dump(high_, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
msg = 'Unable to write to highstate cache file {0}. Do you have permissions?'
log.error(msg.format(cfn))
os.umask(cumask)
return ret | NameError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/sls |
5,265 | def single(fun, name, test=None, queue=False, **kwargs):
'''
Execute a single state function with the named kwargs, returns False if
insufficient data is sent to the command
By default, the values of the kwargs will be parsed as YAML. So, you can
specify lists values, or lists of single entry key-value maps, as you
would in a YAML salt file. Alternatively, JSON format of keyword values
is also supported.
CLI Example:
.. code-block:: bash
salt '*' state.single pkg.installed name=vim
'''
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
comps = fun.split('.')
if len(comps) < 2:
__context__['retcode'] = 1
return 'Invalid function passed'
kwargs.update({'state': comps[0],
'fun': comps[1],
'__id__': name,
'name': name})
orig_test = __opts__.get('test', None)
opts = _get_opts(kwargs.get('localconfig'))
if salt.utils.test_mode(test=test, **kwargs):
opts['test'] = True
else:
opts['test'] = __opts__.get('test', None)
pillar = kwargs.get('pillar')
pillar_enc = kwargs.get('pillar_enc')
if pillar_enc is None \
and pillar is not None \
and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary, unless pillar_enc '
'is specified.'
)
try:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc, proxy=__proxy__)
except __HOLE__:
st_ = salt.state.State(opts, pillar, pillar_enc=pillar_enc)
err = st_.verify_data(kwargs)
if err:
__context__['retcode'] = 1
return err
st_._mod_init(kwargs)
ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs):
st_.call(kwargs)}
_set_retcode(ret)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__['test'] = orig_test
return ret | NameError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/single |
5,266 | def pkg(pkg_path, pkg_sum, hash_type, test=False, **kwargs):
'''
Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state
can be generated using salt-ssh.
CLI Example:
.. code-block:: bash
salt '*' state.pkg /tmp/state_pkg.tgz
'''
# TODO - Add ability to download from salt master or other source
if not os.path.isfile(pkg_path):
return {}
if not salt.utils.get_hash(pkg_path, hash_type) == pkg_sum:
return {}
root = tempfile.mkdtemp()
s_pkg = tarfile.open(pkg_path, 'r:gz')
# Verify that the tarball does not extract outside of the intended root
members = s_pkg.getmembers()
for member in members:
if member.path.startswith((os.sep, '..{0}'.format(os.sep))):
return {}
elif '..{0}'.format(os.sep) in member.path:
return {}
s_pkg.extractall(root)
s_pkg.close()
lowstate_json = os.path.join(root, 'lowstate.json')
with salt.utils.fopen(lowstate_json, 'r') as fp_:
lowstate = json.load(fp_, object_hook=salt.utils.decode_dict)
# Check for errors in the lowstate
for chunk in lowstate:
if not isinstance(chunk, dict):
return lowstate
pillar_json = os.path.join(root, 'pillar.json')
if os.path.isfile(pillar_json):
with salt.utils.fopen(pillar_json, 'r') as fp_:
pillar = json.load(fp_)
else:
pillar = None
popts = _get_opts(kwargs.get('localconfig'))
popts['fileclient'] = 'local'
popts['file_roots'] = {}
if salt.utils.test_mode(test=test, **kwargs):
popts['test'] = True
else:
popts['test'] = __opts__.get('test', None)
envs = os.listdir(root)
for fn_ in envs:
full = os.path.join(root, fn_)
if not os.path.isdir(full):
continue
popts['file_roots'][fn_] = [full]
st_ = salt.state.State(popts, pillar=pillar)
ret = st_.call_chunks(lowstate)
try:
shutil.rmtree(root)
except (IOError, __HOLE__):
pass
return ret | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/state.py/pkg |
5,267 | def gracefulrestart (tokeniser, default):
if len(tokeniser.tokens) == 1:
return default
state = string(tokeniser)
if state in ('disable','disabled'):
return False
try:
grace = int(state)
except __HOLE__:
raise ValueError('"%s" is an invalid graceful-restart time' % state)
if grace < 0:
raise ValueError('graceful-restart can not be negative')
if grace > Graceful.MAX:
raise ValueError('graceful-restart must be smaller or equal to %d' % Graceful.MAX)
return grace | ValueError | dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/configuration/capability.py/gracefulrestart |
5,268 | def get_cpu_style():
global _CPU_STYLE
if _CPU_STYLE is None:
vbs_file = 'get_cpu_style.vbs'
vbs_path = path.join(LOCAL_DIR, vbs_file)
popen = sp.Popen('cscript /nologo %s'%vbs_path, stdout=sp.PIPE, shell=True)
popen.wait()
result = popen.stdout.read()
cpu_style = '%s'%result.strip()
try:
cpu_style = cpu_style.decode('gb18030')
except __HOLE__:
cpu_style = cpu_style.decode('utf8')
_CPU_STYLE = cpu_style.encode('utf8')
return _CPU_STYLE | UnicodeDecodeError | dataset/ETHPy150Open everydo/ztq/ztq_worker/ztq_worker/system_info/win.py/get_cpu_style |
5,269 | def run(self):
try:
while True:
try:
line = raw_input(self.PROMPT)
line = line.strip()
if line:
self.process_command_line(line)
self.last_command = line
elif self.last_command:
self.process_command(self.last_command)
except __HOLE__:
print
except EOFError:
# Print a newline when we get a Ctrl-D on a Posix system.
# Windows exits with a Ctrl-Z+Return, so there is no need for this.
if os.name != "nt":
print | KeyboardInterrupt | dataset/ETHPy150Open mbedmicro/pyOCD/pyOCD/tools/pyocd.py/PyOCDConsole.run |
5,270 | def process_command(self, cmd):
try:
args = cmd.split()
cmd = args[0].lower()
args = args[1:]
# Handle help.
if cmd in ['?', 'help']:
self.show_help(args)
return
# Handle register name as command.
if cmd in pyOCD.target.cortex_m.CORE_REGISTER:
self.tool.handle_reg([cmd])
return
# Check for valid command.
if cmd not in self.tool.command_list:
print "Error: unrecognized command '%s'" % cmd
return
# Run command.
handler = self.tool.command_list[cmd]
handler(args)
except __HOLE__:
print "Error: invalid argument"
traceback.print_exc()
except DAPAccess.TransferError:
print "Error: transfer failed"
except ToolError as e:
print "Error:", e | ValueError | dataset/ETHPy150Open mbedmicro/pyOCD/pyOCD/tools/pyocd.py/PyOCDConsole.process_command |
5,271 | def run(self):
try:
# Read command-line arguments.
self.args = self.get_args()
self.cmd = self.args.cmd
if self.cmd:
self.cmd = self.cmd.lower()
# Set logging level
self.configure_logging()
# Check for a valid command.
if self.cmd and self.cmd not in self.command_list:
print "Error: unrecognized command '%s'" % self.cmd
return 1
# List command must be dealt with specially.
if self.cmd == 'list':
self.handle_list([])
return 0
if self.args.clock != DEFAULT_CLOCK_FREQ_KHZ:
print "Setting SWD clock to %d kHz" % self.args.clock
# Connect to board.
self.board = MbedBoard.chooseBoard(board_id=self.args.board, target_override=self.args.target, init_board=False, frequency=(self.args.clock * 1000))
self.board.target.setAutoUnlock(False)
self.board.target.setHaltOnConnect(False)
try:
self.board.init()
except Exception as e:
print "Exception while initing board:", e
self.target = self.board.target
self.link = self.board.link
self.flash = self.board.flash
# Halt if requested.
if self.args.halt:
self.handle_halt([])
# Handle a device with flash security enabled.
self.didErase = False
if self.target.isLocked() and self.cmd != 'unlock':
print "Error: Target is locked, cannot complete operation. Use unlock command to mass erase and unlock."
if self.cmd and self.cmd not in ['reset', 'info']:
return 1
# If no command, enter interactive mode.
if not self.cmd:
# Say what we're connected to.
print "Connected to %s [%s]: %s" % (self.target.part_number,
CORE_STATUS_DESC[self.target.getState()], self.board.getUniqueID())
# Remove list command that disrupts the connection.
self.command_list.pop('list')
COMMAND_INFO.pop('list')
# Run the command line.
console = PyOCDConsole(self)
console.run()
else:
# Invoke action handler.
result = self.command_list[self.cmd](self.args.args)
if result is not None:
self.exitCode = result
except ToolExitException:
self.exitCode = 0
except __HOLE__:
print "Error: invalid argument"
except DAPAccess.TransferError:
print "Error: transfer failed"
self.exitCode = 2
except ToolError as e:
print "Error:", e
self.exitCode = 1
finally:
if self.board != None:
# Pass false to prevent target resume.
self.board.uninit(False)
return self.exitCode | ValueError | dataset/ETHPy150Open mbedmicro/pyOCD/pyOCD/tools/pyocd.py/PyOCDTool.run |
5,272 | def require_pkgresources(name):
try:
import pkg_resources
except __HOLE__:
raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name)) | ImportError | dataset/ETHPy150Open chalasr/Flask-P2P/venv/lib/python2.7/site-packages/wheel/tool/__init__.py/require_pkgresources |
5,273 | def get_keyring():
try:
from ..signatures import keys
import keyring
except __HOLE__:
raise WheelError("Install wheel[signatures] (requires keyring, pyxdg) for signatures.")
return keys.WheelKeys, keyring | ImportError | dataset/ETHPy150Open chalasr/Flask-P2P/venv/lib/python2.7/site-packages/wheel/tool/__init__.py/get_keyring |
5,274 | def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except __HOLE__:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
command = easy_install.easy_install(install.distribution)
command.args = ['wheel'] # dummy argument
command.finalize_options()
command.install_egg_scripts(pkg_resources_dist) | ImportError | dataset/ETHPy150Open chalasr/Flask-P2P/venv/lib/python2.7/site-packages/wheel/tool/__init__.py/install_scripts |
5,275 | def aget(dct, key):
r"""Allow to get values deep in a dict with iterable keys
Accessing leaf values is quite straightforward:
>>> dct = {'a': {'x': 1, 'b': {'c': 2}}}
>>> aget(dct, ('a', 'x'))
1
>>> aget(dct, ('a', 'b', 'c'))
2
If key is empty, it returns unchanged the ``dct`` value.
>>> aget({'x': 1}, ())
{'x': 1}
"""
key = iter(key)
try:
head = next(key)
except __HOLE__:
return dct
if isinstance(dct, list):
try:
idx = int(head)
except ValueError:
raise IndexNotIntegerError(
"non-integer index %r provided on a list."
% head)
try:
value = dct[idx]
except IndexError:
raise IndexOutOfRange(
"index %d is out of range (%d elements in list)."
% (idx, len(dct)))
else:
try:
value = dct[head]
except KeyError:
## Replace with a more informative KeyError
raise MissingKeyError(
"missing key %r in dict."
% (head, ))
except:
raise NonDictLikeTypeError(
"can't query subvalue %r of a leaf%s."
% (head,
(" (leaf value is %r)" % dct)
if len(repr(dct)) < 15 else ""))
return aget(value, key) | StopIteration | dataset/ETHPy150Open 0k/shyaml/shyaml.py/aget |
5,276 | def oauth_callback(request, service):
local_host = utils.get_local_host(request)
form = forms.OAuth2CallbackForm(service=service, local_host=local_host,
data=request.GET)
if form.is_valid():
try:
user = form.get_authenticated_user()
except __HOLE__ as e:
messages.error(request, smart_text(e))
else:
auth_login(request, user=user)
messages.success(request, _('You are now logged in.'))
return redirect(settings.LOGIN_REDIRECT_URL)
else:
for dummy_field, errors in form.errors.items():
for error in errors:
messages.error(request, error)
return redirect('registration:login') | ValueError | dataset/ETHPy150Open mirumee/saleor/saleor/registration/views.py/oauth_callback |
5,277 | def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control']))
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (__HOLE__, TypeError):
pass | ValueError | dataset/ETHPy150Open django/django/django/utils/cache.py/get_max_age |
5,278 | def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Get HTTP request headers
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since:
if_modified_since = parse_http_date_safe(if_modified_since)
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if if_unmodified_since:
if_unmodified_since = parse_http_date_safe(if_unmodified_since)
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if_match = request.META.get('HTTP_IF_MATCH')
etags = []
if if_none_match or if_match:
# There can be more than one ETag in the request, so we
# consider the list of values.
try:
etags = parse_etags(if_none_match or if_match)
except __HOLE__:
# In case of an invalid ETag, ignore all ETag headers.
# Apparently Opera sends invalidly quoted headers at times
# (we should be returning a 400 response, but that's a
# little extreme) -- this is bug #10681.
if_none_match = None
if_match = None
# If-None-Match must be ignored if original result would be anything
# other than a 2XX or 304 status. 304 status would result in no change.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
if response and not (200 <= response.status_code < 300):
if_none_match = None
if_match = None
# If-Modified-Since must be ignored if the original result was not a 200.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25
if response and response.status_code != 200:
if_modified_since = None
if_unmodified_since = None
if not ((if_match and if_modified_since) or
(if_none_match and if_unmodified_since) or
(if_modified_since and if_unmodified_since) or
(if_match and if_none_match)):
# We only get here if no undefined combinations of headers are
# specified.
if ((if_none_match and (etag in etags or '*' in etags and etag)) and
(not if_modified_since or
(last_modified and if_modified_since and last_modified <= if_modified_since))):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
else:
return _precondition_failed(request)
elif (if_match and (
(not etag and '*' in etags) or (etag and etag not in etags) or
(last_modified and if_unmodified_since and last_modified > if_unmodified_since)
)):
return _precondition_failed(request)
elif (not if_none_match and request.method in ('GET', 'HEAD') and
last_modified and if_modified_since and
last_modified <= if_modified_since):
return _not_modified(request, response)
elif (not if_match and
last_modified and if_unmodified_since and
last_modified > if_unmodified_since):
return _precondition_failed(request)
return response | ValueError | dataset/ETHPy150Open django/django/django/utils/cache.py/get_conditional_response |
5,279 | def wrap_exceptions(fun):
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except __HOLE__ as err:
from psutil._pswindows import ACCESS_DENIED_SET
if err.errno in ACCESS_DENIED_SET:
raise psutil.AccessDenied(None, None)
if err.errno == errno.ESRCH:
raise psutil.NoSuchProcess(None, None)
raise
return wrapper | OSError | dataset/ETHPy150Open giampaolo/psutil/psutil/tests/test_windows.py/wrap_exceptions |
5,280 | @unittest.skipIf(wmi is None, "wmi module is not installed")
@retry_before_failing()
def test_disks(self):
ps_parts = psutil.disk_partitions(all=True)
wmi_parts = wmi.WMI().Win32_LogicalDisk()
for ps_part in ps_parts:
for wmi_part in wmi_parts:
if ps_part.device.replace('\\', '') == wmi_part.DeviceID:
if not ps_part.mountpoint:
# this is usually a CD-ROM with no disk inserted
break
try:
usage = psutil.disk_usage(ps_part.mountpoint)
except __HOLE__ as err:
if err.errno == errno.ENOENT:
# usually this is the floppy
break
else:
raise
self.assertEqual(usage.total, int(wmi_part.Size))
wmi_free = int(wmi_part.FreeSpace)
self.assertEqual(usage.free, wmi_free)
# 10 MB tollerance
if abs(usage.free - wmi_free) > 10 * 1024 * 1024:
self.fail("psutil=%s, wmi=%s" % (
usage.free, wmi_free))
break
else:
self.fail("can't find partition %s" % repr(ps_part)) | OSError | dataset/ETHPy150Open giampaolo/psutil/psutil/tests/test_windows.py/WindowsSpecificTestCase.test_disks |
5,281 | def test_compare_values(self):
def assert_ge_0(obj):
if isinstance(obj, tuple):
for value in obj:
self.assertGreaterEqual(value, 0, msg=obj)
elif isinstance(obj, (int, long, float)):
self.assertGreaterEqual(obj, 0)
else:
assert 0 # case not handled which needs to be fixed
def compare_with_tolerance(ret1, ret2, tolerance):
if ret1 == ret2:
return
else:
if isinstance(ret2, (int, long, float)):
diff = abs(ret1 - ret2)
self.assertLessEqual(diff, tolerance)
elif isinstance(ret2, tuple):
for a, b in zip(ret1, ret2):
diff = abs(a - b)
self.assertLessEqual(diff, tolerance)
from psutil._pswindows import ntpinfo
failures = []
for p in psutil.process_iter():
try:
nt = ntpinfo(*cext.proc_info(p.pid))
except psutil.NoSuchProcess:
continue
assert_ge_0(nt)
for name, tolerance in self.fun_names:
if name == 'proc_memory_info' and p.pid == os.getpid():
continue
if name == 'proc_create_time' and p.pid in (0, 4):
continue
meth = wrap_exceptions(getattr(cext, name))
try:
ret = meth(p.pid)
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
# compare values
try:
if name == 'proc_cpu_times':
compare_with_tolerance(ret[0], nt.user_time, tolerance)
compare_with_tolerance(ret[1],
nt.kernel_time, tolerance)
elif name == 'proc_create_time':
compare_with_tolerance(ret, nt.create_time, tolerance)
elif name == 'proc_num_handles':
compare_with_tolerance(ret, nt.num_handles, tolerance)
elif name == 'proc_io_counters':
compare_with_tolerance(ret[0], nt.io_rcount, tolerance)
compare_with_tolerance(ret[1], nt.io_wcount, tolerance)
compare_with_tolerance(ret[2], nt.io_rbytes, tolerance)
compare_with_tolerance(ret[3], nt.io_wbytes, tolerance)
elif name == 'proc_memory_info':
try:
rawtupl = cext.proc_memory_info_2(p.pid)
except psutil.NoSuchProcess:
continue
compare_with_tolerance(ret, rawtupl, tolerance)
except __HOLE__:
trace = traceback.format_exc()
msg = '%s\npid=%s, method=%r, ret_1=%r, ret_2=%r' % (
trace, p.pid, name, ret, nt)
failures.append(msg)
break
if failures:
self.fail('\n\n'.join(failures))
# ---
# same tests as above but mimicks the AccessDenied failure of
# the first (fast) method failing with AD.
# TODO: currently does not take tolerance into account. | AssertionError | dataset/ETHPy150Open giampaolo/psutil/psutil/tests/test_windows.py/TestDualProcessImplementation.test_compare_values |
5,282 | def KeyHasExpired(key):
"""Check to see whether an SSH key has expired.
Uses Google-specific (for now) semantics of the OpenSSH public key format's
comment field to determine if an SSH key is past its expiration timestamp, and
therefore no longer to be trusted. This format is still subject to change.
Reliance on it in any way is at your own risk.
Args:
key: A single public key entry in OpenSSH public key file format. This will
be checked for Google-specific comment semantics, and if present, those
will be analysed.
Returns:
True if the key has Google-specific comment semantics and has an expiration
timestamp in the past, or False otherwise.
"""
logging.debug('Processing key: %s', key)
try:
schema, json_str = key.split(None, 3)[2:]
except ValueError:
logging.debug('Key does not seem to have a schema identifier.')
logging.debug('Not expiring key.')
return False
if schema != 'google-ssh':
logging.debug('Rejecting %s as potential key schema identifier.', schema)
return False
logging.debug('Google SSH key schema identifier found.')
logging.debug('JSON string detected: %s', json_str)
try:
json_obj = json.loads(json_str)
except __HOLE__:
logging.error('Invalid JSON. Not expiring key.')
return False
if 'expireOn' not in json_obj:
# Use warning instead of error for this failure mode in case we
# add future use cases for this JSON which are unrelated to expiration.
logging.warning('No expiration timestamp. Not expiring key.')
return False
expire_str = json_obj['expireOn']
format_str = '%Y-%m-%dT%H:%M:%S+0000'
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
logging.error(
'Expiration timestamp "%s" not in format %s.', expire_str, format_str)
logging.error('Not expiring key.')
return False
# Expire the key if and only if we have exceeded the expiration timestamp.
return datetime.datetime.utcnow() > expire_time | ValueError | dataset/ETHPy150Open GoogleCloudPlatform/compute-image-packages/google-daemon/usr/share/google/google_daemon/desired_accounts.py/KeyHasExpired |
5,283 | def GetDesiredAccounts(self):
"""Get a list of the accounts desired on the system.
Returns:
A dict of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
"""
logging.debug('Getting desired accounts from metadata.')
# Fetch the top level attribute with a hanging get.
metadata_content = self._GetMetadataUpdate()
metadata_dict = json.loads(metadata_content or '{}')
account_data = None
try:
instance_data = metadata_dict['instance']['attributes']
project_data = metadata_dict['project']['attributes']
# Instance SSH keys to use regardless of project metadata.
valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
block_project = instance_data.get('block-project-ssh-keys', '').lower()
if block_project != 'true' and not instance_data.get('sshKeys'):
valid_keys.append(project_data.get('ssh-keys'))
valid_keys.append(project_data.get('sshKeys'))
valid_keys = [key for key in valid_keys if key]
account_data = '\n'.join(valid_keys)
except __HOLE__:
logging.debug('Project or instance attributes were not found.')
return AccountDataToDictionary(account_data) | KeyError | dataset/ETHPy150Open GoogleCloudPlatform/compute-image-packages/google-daemon/usr/share/google/google_daemon/desired_accounts.py/DesiredAccounts.GetDesiredAccounts |
5,284 | def get(self, item, default=None):
try:
return self.__getitem__(item)
except __HOLE__:
return default | KeyError | dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/core/system.py/WorkspaceEntryDict.get |
5,285 | def pop(self, *args):
if len(args) > 2:
raise TypeError, 'pop expected at most 2 arguments, got %d' % len(args)
elif len(args) < 1:
raise TypeError, 'pop expected at least 1 arguments, got %d' % len(args)
if args[0] not in self.keys():
try:
return args[1]
except __HOLE__:
raise KeyError, args[0]
cmds.fileInfo( rm=args[0]) | IndexError | dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/core/system.py/FileInfo.pop |
5,286 | def __init__(self, pathOrRefNode=None, namespace=None, refnode=None):
import general, nodetypes
self._refNode = None
if pathOrRefNode:
if isinstance(pathOrRefNode, (basestring,Path)):
try:
self._refNode = general.PyNode( cmds.referenceQuery( str(pathOrRefNode), referenceNode=1 ) )
except __HOLE__:
pass
if not self._refNode:
if isinstance( pathOrRefNode, nodetypes.Reference ):
self._refNode = pathOrRefNode
else:
try:
self._refNode = general.PyNode( pathOrRefNode )
except general.MayaObjectError:
self._refNode = general.PyNode( cmds.file( pathOrRefNode, q=1, referenceNode=True) )
elif namespace:
namespace = namespace.rstrip(':')
for iNamespace, iRefNode in iterReferences(namespaces=True, recursive=True, refNodes=True, references=False):
if namespace == iNamespace:
self._refNode = iRefNode
break
if self._refNode is None:
raise RuntimeError,"Could not find a reference with the namespace %r" % namespace
elif refnode:
self._refNode = general.PyNode( refnode )
assert self._refNode.type() == 'reference'
# def create(path, unresolvedPath ):
# """Actually create the FileReference object"""
# def splitCopyNumber(path):
# """Return a tuple with the path and the copy number. Second element will be None if no copy number"""
# buf = path.split('{')
# try:
# return ( buf[0], int(buf[1][:-1]) )
# except:
# return (path, None)
#
# path, copyNumber = splitCopyNumber(path)
# unresolvedPath, copyNumber2 = splitCopyNumber(unresolvedPath)
# assert copyNumber == copyNumber2, "copy number of %s is not the same as %s" % ( path, unresolvedPath )
# self._file = Path(path)
# self._copyNumber = copyNumber
# self._unresolvedPath = Path(unresolvedPath)
# #self._refNode = refNode
# #return self
#
# # Direct mappings:
# # refNode --> refFile: MFileIO.getReferenceFileByNode( refNode )
# # refFile --> refNode: cmds.file( refFile, q=1, referenceNode=1)
# # refFile --> namespace: refNode.namespace() + cmds.file( refFile, q=1, namespace=1)
# self._refNode = None
#
# import general
# if unresolvedPath:
# # check to ensure it's legit
# assert path in ReferenceCache.byFullPath, "%s is not a valid reference file" % path
# return create(path, unresolvedPath)
#
# if refnode:
# refNode = general.PyNode(refnode)
# self._refNode = refNode
# # refNode is all we need for now. we can get anything else from this when it is asked for
# return
#
#
#
# resolvedPath, unresolvedPath = ReferenceCache.getPaths( path, namespace )
# create( resolvedPath, unresolvedPath ) | RuntimeError | dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/core/system.py/FileReference.__init__ |
5,287 | def run(self):
try:
Arbiter(self).run()
except __HOLE__ as e:
print("\nError: %s\n" % e, file=sys.stderr)
sys.stderr.flush()
sys.exit(1) | RuntimeError | dataset/ETHPy150Open chalasr/Flask-P2P/venv/lib/python2.7/site-packages/gunicorn/app/base.py/BaseApplication.run |
5,288 | def load_config_from_module_name_or_filename(self, location):
"""
Loads the configuration file: the file is a python file, otherwise raise an RuntimeError
Exception or stop the process if the configuration file contains a syntax error.
"""
try:
cfg = self.get_config_from_module_name(module_name=location)
except __HOLE__:
cfg = self.get_config_from_filename(filename=location)
for k, v in cfg.items():
# Ignore unknown names
if k not in self.cfg.settings:
continue
try:
self.cfg.set(k.lower(), v)
except:
print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr)
sys.stderr.flush()
raise
return cfg | ImportError | dataset/ETHPy150Open chalasr/Flask-P2P/venv/lib/python2.7/site-packages/gunicorn/app/base.py/Application.load_config_from_module_name_or_filename |
5,289 | def _get_stream_info_from_value(self, result, name):
r_dict = {}
if 'value' in result:
value = result['value']
try:
value_dict = json.loads(value)
except (__HOLE__, TypeError):
return Failure(InvalidStreamInfoError(name))
known_fields = ['stream_hash', 'name', 'description', 'key_fee', 'key_fee_address', 'thumbnail',
'content_license', 'sources', 'fee', 'author']
known_sources = ['lbry_sd_hash', 'btih', 'url']
known_fee_types = {'LBC': ['amount', 'address']}
for field in known_fields:
if field in value_dict:
if field == 'sources':
for source in known_sources:
if source in value_dict[field]:
if source == 'lbry_sd_hash':
r_dict['stream_hash'] = value_dict[field][source]
else:
r_dict[source] = value_dict[field][source]
elif field == 'fee':
fee = value_dict['fee']
if 'type' in fee:
if fee['type'] in known_fee_types:
fee_fields = known_fee_types[fee['type']]
if all([f in fee for f in fee_fields]):
r_dict['key_fee'] = fee['amount']
r_dict['key_fee_address'] = fee['address']
else:
for f in ['key_fee', 'key_fee_address']:
if f in r_dict:
del r_dict[f]
else:
r_dict[field] = value_dict[field]
if 'stream_hash' in r_dict and 'txid' in result:
d = self._save_name_metadata(name, r_dict['stream_hash'], str(result['txid']))
else:
d = defer.succeed(True)
d.addCallback(lambda _: r_dict)
return d
elif 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name)) | ValueError | dataset/ETHPy150Open lbryio/lbry/lbrynet/core/LBRYcrdWallet.py/LBRYWallet._get_stream_info_from_value |
5,290 | def _get_status_of_claim(self, txid, name, sd_hash):
d = self.get_claims_from_tx(txid)
def get_status(claims):
if claims is None:
claims = []
for claim in claims:
if 'in claim trie' in claim:
if 'name' in claim and str(claim['name']) == name and 'value' in claim:
try:
value_dict = json.loads(claim['value'])
except (__HOLE__, TypeError):
return None
claim_sd_hash = None
if 'stream_hash' in value_dict:
claim_sd_hash = str(value_dict['stream_hash'])
if 'sources' in value_dict and 'lbrynet_sd_hash' in value_dict['sources']:
claim_sd_hash = str(value_dict['sources']['lbry_sd_hash'])
if claim_sd_hash is not None and claim_sd_hash == sd_hash:
if 'is controlling' in claim and claim['is controlling']:
return name, "valid"
if claim['in claim trie']:
return name, "invalid"
if 'in queue' in claim and claim['in queue']:
return name, "pending"
return name, "unconfirmed"
return None
d.addCallback(get_status)
return d | ValueError | dataset/ETHPy150Open lbryio/lbry/lbrynet/core/LBRYcrdWallet.py/LBRYWallet._get_status_of_claim |
5,291 | def _check_expected_balances(self):
now = datetime.datetime.now()
balances_to_check = []
try:
while self.expected_balance_at_time[0][3] < now:
balances_to_check.append(self.expected_balance_at_time.popleft())
except __HOLE__:
pass
ds = []
for balance_to_check in balances_to_check:
log.info("Checking balance of address %s", str(balance_to_check[1]))
d = self._get_balance_for_address(balance_to_check[1])
d.addCallback(lambda bal: bal >= balance_to_check[2])
ds.append(d)
dl = defer.DeferredList(ds)
def handle_checks(results):
from future_builtins import zip
for balance, (success, result) in zip(balances_to_check, results):
peer = balance[0]
if success is True:
if result is False:
if balance[4] <= 1: # first or second strike, give them another chance
new_expected_balance = (balance[0],
balance[1],
balance[2],
datetime.datetime.now() + self.max_expected_payment_time,
balance[4] + 1,
balance[5])
self.expected_balance_at_time.append(new_expected_balance)
peer.update_score(-5.0)
else:
peer.update_score(-50.0)
else:
if balance[4] == 0:
peer.update_score(balance[5])
peer.update_stats('points_received', balance[5])
else:
log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
"expected balance: %s, expected time: %s, count: %s, error: %s",
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
str(balance[4]), str(result.getErrorMessage()))
dl.addCallback(handle_checks)
return dl | IndexError | dataset/ETHPy150Open lbryio/lbry/lbrynet/core/LBRYcrdWallet.py/LBRYWallet._check_expected_balances |
5,292 | def _start_daemon(self):
tries = 0
try:
rpc_conn = self._get_rpc_conn()
rpc_conn.getinfo()
log.info("lbrycrdd was already running when LBRYcrdWallet was started.")
return
except (socket.error, JSONRPCException):
tries += 1
log.info("lbrcyrdd was not running when LBRYcrdWallet was started. Attempting to start it.")
try:
if os.name == "nt":
si = subprocess.STARTUPINFO
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf], startupinfo=si)
else:
if sys.platform == 'darwin':
os.chdir("/Applications/LBRY.app/Contents/Resources")
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf])
self.started_lbrycrdd = True
except __HOLE__:
import traceback
log.error("Couldn't launch lbrycrdd at path %s: %s", self.lbrycrdd_path, traceback.format_exc())
raise ValueError("Couldn't launch lbrycrdd. Tried %s" % self.lbrycrdd_path)
while tries < 6:
try:
rpc_conn = self._get_rpc_conn()
rpc_conn.getinfo()
break
except (socket.error, JSONRPCException):
tries += 1
log.warning("Failed to connect to lbrycrdd.")
if tries < 6:
time.sleep(2 ** tries)
log.warning("Trying again in %d seconds", 2 ** tries)
else:
log.warning("Giving up.")
else:
self.lbrycrdd.terminate()
raise ValueError("Couldn't open lbrycrdd") | OSError | dataset/ETHPy150Open lbryio/lbry/lbrynet/core/LBRYcrdWallet.py/LBRYcrdWallet._start_daemon |
5,293 | def exists(path):
# Figure out what (if any) part of the path is a zip archive.
archive_path, file_path = split_zip_path(path)
# If the user is not trying to check a zip file, just use os.path...
if not archive_path:
return os.path.exists(path)
# otherwise check the zip file.
with closing(zipfile.ZipFile(archive_path, mode = "r")) as archive:
try:
archive.getinfo(file_path)
except KeyError:
try:
archive.getinfo(file_path + "/")
except __HOLE__:
return False
return True | KeyError | dataset/ETHPy150Open brownhead/superzippy/superzippy/bootstrapper/zipsite.py/exists |
5,294 | def GetCompilerType(env):
try:
sysenv = environ.copy()
sysenv['PATH'] = str(env['ENV']['PATH'])
result = exec_command([env.subst("$CC"), "-v"], env=sysenv)
except __HOLE__:
return None
if result['returncode'] != 0:
return None
output = "".join([result['out'], result['err']]).lower()
if "clang" in output and "LLVM" in output:
return "clang"
elif "gcc" in output:
return "gcc"
return None | OSError | dataset/ETHPy150Open platformio/platformio/platformio/builder/tools/piomisc.py/GetCompilerType |
5,295 | def read_sensor():
try:
# pressure=pressure = bmp.readPressure()/100.0
light=grovepi.analogRead(light_sensor)
[temp,humidity] = grovepi.dht(temp_humidity_sensor,therm_version) # Here we're using the thermometer version.
#Return -1 in case of bad temp/humidity sensor reading
if math.isnan(temp) or math.isnan(humidity): #temp/humidity sensor sometimes gives nan
# return [-1,-1,-1,-1]
return [-1,-1,-1]
# return [pressure,light,temp,humidity]
return [light,temp,humidity]
#Return -1 in case of sensor error
except (__HOLE__,TypeError) as e:
return [-1,-1,-1]
# return [-1,-1,-1,-1]
#Take a picture with the current time using the Raspberry Pi camera. Save it in the same folder. | IOError | dataset/ETHPy150Open DexterInd/GrovePi/Projects/weather_station/weather_station-White_Temp_Sensor.py/read_sensor |
5,296 | @classmethod
def flatten(cls, args):
# quick-n-dirty flattening for And and Or
args_queue = list(args)
res = []
while True:
try:
arg = args_queue.pop(0)
except __HOLE__:
break
if isinstance(arg, Logic):
if isinstance(arg, cls):
args_queue.extend(arg.args)
continue
res.append(arg)
args = tuple(res)
return args | IndexError | dataset/ETHPy150Open sympy/sympy/sympy/core/logic.py/AndOr_Base.flatten |
5,297 | def save_session(self, session_name):
if not serialize.is_valid(session_name):
error_message("invalid_name", session_name)
self.run()
return
session = Session.save(session_name, sublime.windows())
try:
serialize.dump(session_name, session)
except __HOLE__ as e:
error_message(e.errno) | OSError | dataset/ETHPy150Open Zeeker/sublime-SessionManager/SessionManager.py/SaveSession.save_session |
5,298 | def handle_session(self, session_name):
try:
session = serialize.load(session_name)
except __HOLE__ as e:
error_message(e.errno)
else:
session.load() | OSError | dataset/ETHPy150Open Zeeker/sublime-SessionManager/SessionManager.py/LoadSession.handle_session |
5,299 | def handle_session(self, session_name):
try:
serialize.delete(session_name)
except __HOLE__ as e:
error_message(e.errno)
else:
sublime.status_message(messages.message("deleted", session_name)) | OSError | dataset/ETHPy150Open Zeeker/sublime-SessionManager/SessionManager.py/DeleteSession.handle_session |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.