function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def _ignore_request_headers_rewriter(environ):
"""Ignore specific request headers.
Certain request headers should not be sent to the application. This function
removes those headers from the environment. For a complete list of these
headers please see:
https://developers.google.com/appengine/docs/python/runtime#Request_Headers
Args:
environ: An environ dict for the current request as defined in PEP-333.
"""
for h in constants.IGNORED_REQUEST_HEADERS:
h = 'HTTP_' + h.replace('-', '_').upper()
try:
del environ[h]
except __HOLE__:
pass
# A list of functions that take an environ and possibly modify it. The functions
# are applied to the request in order. | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/tools/devappserver2/request_rewriter.py/_ignore_request_headers_rewriter |
def _rewriter_middleware(request_rewriter_chain, response_rewriter_chain,
application, environ, start_response):
"""Wraps an application and applies a chain of rewriters to its response.
This first applies each function in request_rewriter_chain to the environ. It
then executes the application, and applies each function in
response_rewriter_chain to the response.
Args:
request_rewriter_chain: A chain of functions to apply to the environ.
response_rewriter_chain: A chain of functions to apply to the response.
application: The WSGI application to wrap as defined in PEP-333.
environ: An environ dict for the current request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable of strings containing the body of an HTTP response.
"""
response_dict = {'headers_sent': False}
write_body = cStringIO.StringIO()
def wrapped_start_response(status, response_headers, exc_info=None):
if exc_info and response_dict['headers_sent']:
# Headers have already been sent. PEP 333 mandates that this is an error.
raise exc_info[0], exc_info[1], exc_info[2]
response_dict['status'] = status
response_dict['response_headers'] = response_headers
return write_body.write
for rewriter in request_rewriter_chain:
rewriter(environ)
response_body = iter(application(environ, wrapped_start_response))
# Get the first non-empty string from the application's response. This ensures
# that the application has called wrapped_start_response, and allows us to
# treat future calls to wrapped_start_response as errors.
first = write_body.getvalue()
while not first:
try:
first = response_body.next()
except __HOLE__:
break
# A conformant application must have called wrapped_start_response by this
# point, and should not call it again unless there is an unrecoverable error.
response_dict['headers_sent'] = True
try:
status = response_dict['status']
response_headers = response_dict['response_headers']
except KeyError:
raise AssertionError('Application yielded before calling start_response.')
# Prepend first onto response_body.
def reconstructed_body():
yield first
for string in response_body:
yield string
body = reconstructed_body()
state = RewriterState(environ, status, response_headers, body)
for rewriter in response_rewriter_chain:
rewriter(state)
start_response(state.status, state.headers.items())
return state.body | StopIteration | dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/tools/devappserver2/request_rewriter.py/_rewriter_middleware |
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except __HOLE__:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg") | ImportError | dataset/ETHPy150Open scikit-learn/scikit-learn/sklearn/cluster/tests/test_spectral.py/test_spectral_amg_mode |
def k_fold_cross_validation(fitters, df, duration_col, event_col=None,
k=5, evaluation_measure=concordance_index,
predictor="predict_expectation", predictor_kwargs={},
fitter_kwargs={}):
"""
Perform cross validation on a dataset. If multiple models are provided,
all models will train on each of the k subsets.
fitter(s): one or several objects which possess a method:
fit(self, data, duration_col, event_col)
Note that the last two arguments will be given as keyword arguments,
and that event_col is optional. The objects must also have
the "predictor" method defined below.
df: a Pandas dataframe with necessary columns `duration_col` and `event_col`, plus
other covariates. `duration_col` refers to the lifetimes of the subjects. `event_col`
refers to whether the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: the column in dataframe that contains the subjects lifetimes.
event_col: the column in dataframe that contains the subject's death observation. If left
as None, assumes all individuals are non-censored.
k: the number of folds to perform. n/k data will be withheld for testing on.
evaluation_measure: a function that accepts either (event_times, predicted_event_times),
or (event_times, predicted_event_times, event_observed)
and returns something (could be anything).
Default: statistics.concordance_index: (C-index)
between two series of event times
predictor: a string that matches a prediction method on the fitter instances.
For example, "predict_expectation" or "predict_percentile".
Default is "predict_expectation"
The interface for the method is:
predict(self, data, **optional_kwargs)
fitter_kwargs: keyword args to pass into fitter.fit method
predictor_kwargs: keyword args to pass into predictor-method.
Returns:
(k,1) list of scores for each fold. The scores can be anything.
"""
# Make sure fitters is a list
try:
fitters = list(fitters)
except TypeError:
fitters = [fitters]
# Each fitter has its own scores
fitterscores = [[] for _ in fitters]
n, d = df.shape
df = df.copy()
if event_col is None:
event_col = 'E'
df[event_col] = 1.
df = df.reindex(np.random.permutation(df.index)).sort(event_col)
assignments = np.array((n // k + 1) * list(range(1, k + 1)))
assignments = assignments[:n]
testing_columns = df.columns - [duration_col, event_col]
for i in range(1, k + 1):
ix = assignments == i
training_data = df.ix[~ix]
testing_data = df.ix[ix]
T_actual = testing_data[duration_col].values
E_actual = testing_data[event_col].values
X_testing = testing_data[testing_columns]
for fitter, scores in zip(fitters, fitterscores):
# fit the fitter to the training data
fitter.fit(training_data, duration_col=duration_col,
event_col=event_col, **fitter_kwargs)
T_pred = getattr(fitter, predictor)(X_testing, **predictor_kwargs).values
try:
scores.append(evaluation_measure(T_actual, T_pred, E_actual))
except __HOLE__:
scores.append(evaluation_measure(T_actual, T_pred))
# If a single fitter was given as argument, return a single result
if len(fitters) == 1:
return fitterscores[0]
else:
return fitterscores | TypeError | dataset/ETHPy150Open CamDavidsonPilon/lifelines/lifelines/utils/__init__.py/k_fold_cross_validation |
def expand(self, constraints):
if self.tripleStore.graph.store.batch_unification:
patterns = []
if self.statement:
self.checkForEagerTermination()
for statement in [self.statement] + self.rest:
(s,p,o,func) = statement
searchTerms = [self._bind(term) is not None \
and self._bind(term) \
or term
for term in [s,p,o]]
(search_s, search_p,search_o) = searchTerms # (self._bind(s),self._bind(p),self._bind(o))
if self.tripleStore.graphVariable:
graphName = self.bindings.get(
self.tripleStore.graphVariable,
self.tripleStore.graphVariable)
elif isinstance(self.tripleStore.graph, ConjunctiveGraph) \
and self.tripleStore.DAWG_DATASET_COMPLIANCE:
# For query-constructed datasets, match against the
# 'default graph' - the first Graph with a non-URIRef
# identifier (or an empty, default graph)
if isinstance(self.tripleStore.graph,
ReadOnlyGraphAggregate):
graphName = None
for g in self.tripleStore.graph.graphs:
# @@FIXME: unused code
# searchRT = []
if isinstance(g.identifier, BNode):
graphName = g.identifier
break
if graphName is None:
# No default graph was created and the active
# graph is supposed to be the default graph
# so we should have no answers
continue
else:
# match against the default graph
graphName = self.tripleStore.graph.default_context.identifier
elif isinstance(self.tripleStore.graph, ConjunctiveGraph):
# match all graphs
graphName = Variable(BNode())
else:
# otherwise, the default graph is the graph queried
graphName = self.tripleStore.graph.identifier
patterns.append((search_s, search_p, search_o,graphName))
# expand at server, accumulating results
rt = []
nonGroundPatterns = [pattern for pattern in patterns
if not isGroundQuad(pattern)]
if nonGroundPatterns:
# Only evaluate at the server if not all the terms are ground
for rtDict in self.tripleStore.graph.store.batch_unify(patterns):
self.checkForEagerTermination()
if self.tripleStore.graphVariable:
if isinstance(rtDict[self.tripleStore.graphVariable],BNode) \
and self.tripleStore.DAWG_DATASET_COMPLIANCE:
# We can't match the default graph when the active
# graph is set via the GRAPH expression
continue
rt.append(rtDict)
# create a copy of the current bindings, by also adding
# the new ones from result of the search
new_bindings = self.bindings.copy()
new_bindings.update(rtDict)
child = _SPARQLNode(self, new_bindings, [],
self.tripleStore, expr=self.expr)
self.children.append(child)
assert not child.clash and child.bindings
for func in constraints:
try:
if func(new_bindings) == False:
child.clash = True
break
except __HOLE__:
child.clash=True
if not child.clash and self.expr in self.queryProlog.rightMostBGPs:
child.noteTopLevelAnswer(self.queryProlog)
else:
#If all the patterns are ground, there is no need
#to invoke server-side unification (no variables to batch unify)
self.expandAtClient(constraints)
return
if self.statement:
if nonGroundPatterns and len(self.children) == 0:
self.clash = True
else:
for func in constraints:
try:
if func(self.bindings) == False:
self.clash = True
break
except TypeError:
self.clash = True
if not self.clash and self.expr in self.queryProlog.rightMostBGPs:
self.noteTopLevelAnswer(self.queryProlog)
else:
self.expandAtClient(constraints) | TypeError | dataset/ETHPy150Open RDFLib/rdfextras/rdfextras/sparql/query.py/_SPARQLNode.expand |
def expandAtClient(self, constraints):
"""
The expansion itself. See class comments for details.
:param constraints: array of global constraining (filter) methods
"""
self.checkForEagerTermination()
# if there are no more statements, that means that the constraints
# have been fully expanded
if self.statement :
# decompose the statement into subject, predicate and object
# default setting for the search statement
# see if subject (resp. predicate and object) is already bound. This
# is done by taking over the content of self.dict if not None and replacing
# the subject with that binding
# the (search_subject,search_predicate,search_object) is then created
(s,p,o,func) = self.statement
# put the bindings we have so far into the statement; this may add None values,
# but that is exactly what RDFLib uses in its own search methods!
(search_s,search_p,search_o) = (self._bind(s),self._bind(p),self._bind(o))
# We need to keep track of the original Graph associated with the tripleStore
# so we can switch back to it after changing the active graph (if we do)
# otherwise we will effect later evaluations which use the same tripleStore instance
originalGraph = None
if self.tripleStore.graphVariable:
if hasattr(self.tripleStore.graph, 'quads'):
if self.tripleStore.graphVariable not in self.bindings:
searchRT = self.tripleStore.graph.quads(
(search_s, search_p,search_o)
)
else:
graphName = self.bindings[self.tripleStore.graphVariable]
assert not self.tripleStore.DAWG_DATASET_COMPLIANCE \
or isinstance(graphName,URIRef), \
"Cannot formally return graph name solutions for the default graph!"
unifiedGraph = Graph(self.tripleStore.graph.store, identifier=graphName)
originalGraph = self.tripleStore.graph
self.tripleStore.graph = unifiedGraph
searchRT = [(_s,_p,_o,unifiedGraph)
for _s,_p,_o in unifiedGraph.triples(
(search_s,search_p,search_o))]
else:
assert not self.tripleStore.DAWG_DATASET_COMPLIANCE \
or isinstance(self.tripleStore.graph.identifier,URIRef),\
"Cannot formally return graph name solutions for the default graph"
searchRT = [(_s,_p,_o,self.tripleStore.graph)
for _s,_p,_o in self.tripleStore.graph.triples(
(search_s, search_p, search_o))]
elif self.tripleStore.DAWG_DATASET_COMPLIANCE \
and isinstance(self.tripleStore.graph,ConjunctiveGraph):
# For query-constructed datasets, match against the 'default graph' -
# the first Graph with a non-URIRef identifier (or an empty, default graph)
if isinstance(self.tripleStore.graph, ReadOnlyGraphAggregate):
for g in self.tripleStore.graph.graphs:
searchRT = []
if isinstance(g.identifier, BNode):
searchRT = g.triples((search_s, search_p, search_o))
break
else:
# match against the default graph
searchRT = self.tripleStore.graph.default_context.triples(
(search_s, search_p, search_o))
else:
#otherwise, the default graph is the graph queried
searchRT = self.tripleStore.graph.triples(
(search_s,search_p,search_o))
if originalGraph:
self.tripleStore.graph = originalGraph
for tripleOrQuad in searchRT:
if self.tripleStore.graphVariable:
(result_s,result_p,result_o,parentGraph) = tripleOrQuad
if isinstance(self.tripleStore.graph,ConjunctiveGraph) \
and self.tripleStore.DAWG_DATASET_COMPLIANCE \
and isinstance(parentGraph.identifier, BNode):
continue
assert isinstance(parentGraph.identifier,URIRef)
else:
(result_s,result_p,result_o) = tripleOrQuad
# if a user defined constraint has been added, it should be checked now
if func != None and func(result_s, result_p, result_o) == False:
# Oops, this result is not acceptable, jump over it!
continue
# create a copy of the current bindings, by also adding the new
# ones from result of the search
new_bindings = self.bindings.copy()
# @@FIXME: unused code
# queryTerms = [s,p,o]
preClash = False
for searchSlot,searchTerm,result in [
(search_s, s, result_s),
(search_p, p, result_p),
(search_o, o, result_o)]:
# searchSlot is what we searched with (variables become none)
# searchTerm is the term in the triple pattern
# result is the unified term from the dataset
if searchSlot == None:
# An unknown
currBound = new_bindings.get(searchTerm)
if currBound is not None:
if currBound != result:
preClash = True
else:
new_bindings[searchTerm] = result
if self.tripleStore.graphVariable:
new_bindings[self.tripleStore.graphVariable] = parentGraph.identifier
# Recursion starts here: create and expand a new child
child = _SPARQLNode(self, new_bindings, self.rest,
self.tripleStore, expr=self.expr)
if preClash:
child.clash = True
else:
child.expand(constraints)
# if the child is a clash then no use adding it to the tree,
# it can be forgotten
if self.clash == False:
self.children.append(child)
if len(self.children) == 0:
# this means that the constraints could not be met at all
# with this binding!!!!
self.clash = True
else:
# this is if all bindings are done; the conditions (ie, global
# constraints) are still to be checked
if self.bound == True and self.clash == False:
for func in constraints:
try:
if func(self.bindings) == False:
self.clash = True
break
except __HOLE__:
self.clash=True
if not self.clash and self.expr in self.queryProlog.rightMostBGPs:
self.noteTopLevelAnswer(self.queryProlog) | TypeError | dataset/ETHPy150Open RDFLib/rdfextras/rdfextras/sparql/query.py/_SPARQLNode.expandAtClient |
def _orderedSelect(self, selection, orderedBy, orderDirection):
"""
The variant of the selection (as below) that also includes the
sorting. Because that is much less efficient, this is separated into
a distinct method that is called only if necessary. It is called
from the :meth:`select` method.
Because order can be made on variables that are not part of the final
selection, this method retrieves a *full* binding from the result to
be able to order it (whereas the core :meth:`select` method retrieves
only the selected bindings from the result). The full binding is an
array of (binding) dictionaries; the sorting sorts this array by
comparing the bound variables in the respective dictionaries. Once
this is done, the final selection is done.
:param selection: Either a single query string, or an array or tuple
of query strings.
:param orderBy: either a function or a list of strings (corresponding
to variables in the query). If None, no sorting occurs on the
results. If the parameter is a function, it must take two
dictionary arguments (the binding dictionaries), return -1, 0,
and 1, corresponding to smaller, equal, and greater, respectively.
:param orderDirection: if not None, then an array of integers of the
same length as orderBy, with values the constants ASC or DESC
(defined in the module). If None, an ascending order is used.
:return: selection results as a list of tuples
:raise SPARQLError: invalid sorting arguments
"""
fullBinding = self._getFullBinding()
if type(orderedBy) is types.FunctionType:
_sortBinding = orderedBy
else:
orderKeys = _variablesToArray(orderedBy,"orderBy")
# see the direction
oDir = None # this is just to fool the interpreter's error message
if orderDirection is None :
oDir = [True for i in xrange(0, len(orderKeys))]
elif type(orderDirection) is types.BooleanType:
oDir = [orderDirection]
elif type(orderDirection) is not types.ListType \
and type(orderDirection) is not types.TupleType:
raise SPARQLError(
"'orderDirection' argument must be a list")
elif len(orderDirection) != len(orderKeys) :
raise SPARQLError(
"'orderDirection' must be of an equal length to 'orderBy'")
else :
oDir = orderDirection
def _sortBinding(b1, b2):
"""
The sorting method used by the array sort, with return values
as required by the Python run-time
The to-be-compared data are dictionaries of bindings.
"""
for i in xrange(0, len(orderKeys)):
# each key has to be compared separately. If there is a
# clear comparison result on that key then we are done,
# but when that is not the case, the next in line should
# be used
key = orderKeys[i]
direction = oDir[i]
if key in b1 and key in b2:
val1 = b1[key]
val2 = b2[key]
if val1 != None and val2 != None:
if direction:
if val1 < val2:
return -1
elif val1 > val2:
return 1
else:
if val1 > val2:
return -1
elif val1 < val2:
return 1
return 0
# get the full Binding sorted
try:
keyfunc = functools.cmp_to_key(_sortBinding)
fullBinding.sort(key=keyfunc)
except __HOLE__:
# Python < 2.7
fullBinding.sort(cmp=_sortBinding)
# remember: _processResult turns the expansion results (an array of
# dictionaries) into an array of tuples in the right, original order
retval = _processResults(selection,fullBinding, self._getAllVariables())
return retval | AttributeError | dataset/ETHPy150Open RDFLib/rdfextras/rdfextras/sparql/query.py/Query._orderedSelect |
def main():
TestRunner = get_runner(settings)
# Ugly parameter parsing. We probably want to improve that in future
# or just use default django test command. This may be problematic,
# knowing how testing in Django changes from version to version.
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
pattern = sys.argv.pop(pos) and sys.argv.pop(pos)
except __HOLE__:
print(usage())
sys.exit(1)
else:
pattern = None
test_modules = sys.argv[1:]
test_runner = TestRunner(verbosity=2, failfast=False, pattern=pattern)
if len(sys.argv) > 1:
test_modules = sys.argv[1:]
elif len(sys.argv) == 1:
test_modules = []
else:
print(usage())
sys.exit(1)
if (1, 6, 0) <= django.VERSION < (1, 9, 0):
# this is a compat hack because in django>=1.6.0 you must provide
# module like "userena.contrib.umessages" not "umessages"
from django.db.models import get_app
test_modules = [
# be more strict by adding .tests to not run umessages tests twice
# if both userena and umessages are tested
get_app(module_name).__name__[:-7] + ".tests"
for module_name
in test_modules
]
elif django.VERSION >= (1, 9, 0):
from django.apps import apps
test_modules = [
# be more strict by adding .tests to not run umessages tests twice
# if both userena and umessages are tested
apps.get_app_config(module_name).name + ".tests"
for module_name
in test_modules
]
if django.VERSION < (1, 7, 0):
# starting from 1.7.0 built in django migrations are run
# for older releases this patch is required to enable testing with
# migrations
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
failures = test_runner.run_tests(test_modules or ['userena'])
sys.exit(failures) | IndexError | dataset/ETHPy150Open bread-and-pepper/django-userena/userena/runtests/runtests.py/main |
def _create(self, path):
success = True
parent = os.path.abspath(os.path.join(os.path.expanduser(path), os.pardir))
if not self._exists(parent):
try:
os.makedirs(parent)
except __HOLE__:
self._log.warning('Failed to create directory %s' % parent)
success = False
else:
self._log.lowinfo('Creating directory %s' % parent)
return success | OSError | dataset/ETHPy150Open anishathalye/dotbot/plugins/link.py/Link._create |
def _delete(self, source, path, relative, force):
success = True
source = os.path.join(self._context.base_directory(), source)
fullpath = os.path.expanduser(path)
if relative:
source = self._relative_path(source, fullpath)
if ((self._is_link(path) and self._link_destination(path) != source) or
(self._exists(path) and not self._is_link(path))):
removed = False
try:
if os.path.islink(fullpath):
os.unlink(fullpath)
removed = True
elif force:
if os.path.isdir(fullpath):
shutil.rmtree(fullpath)
removed = True
else:
os.remove(fullpath)
removed = True
except __HOLE__:
self._log.warning('Failed to remove %s' % path)
success = False
else:
if removed:
self._log.lowinfo('Removing %s' % path)
return success | OSError | dataset/ETHPy150Open anishathalye/dotbot/plugins/link.py/Link._delete |
def _link(self, source, link_name, relative):
'''
Links link_name to source.
Returns true if successfully linked files.
'''
success = False
destination = os.path.expanduser(link_name)
absolute_source = os.path.join(self._context.base_directory(), source)
if relative:
source = self._relative_path(absolute_source, destination)
else:
source = absolute_source
if (not self._exists(link_name) and self._is_link(link_name) and
self._link_destination(link_name) != source):
self._log.warning('Invalid link %s -> %s' %
(link_name, self._link_destination(link_name)))
# we need to use absolute_source below because our cwd is the dotfiles
# directory, and if source is relative, it will be relative to the
# destination directory
elif not self._exists(link_name) and self._exists(absolute_source):
try:
os.symlink(source, destination)
except __HOLE__:
self._log.warning('Linking failed %s -> %s' % (link_name, source))
else:
self._log.lowinfo('Creating link %s -> %s' % (link_name, source))
success = True
elif self._exists(link_name) and not self._is_link(link_name):
self._log.warning(
'%s already exists but is a regular file or directory' %
link_name)
elif self._is_link(link_name) and self._link_destination(link_name) != source:
self._log.warning('Incorrect link %s -> %s' %
(link_name, self._link_destination(link_name)))
# again, we use absolute_source to check for existence
elif not self._exists(absolute_source):
if self._is_link(link_name):
self._log.warning('Nonexistent target %s -> %s' %
(link_name, source))
else:
self._log.warning('Nonexistent target for %s : %s' %
(link_name, source))
else:
self._log.lowinfo('Link exists %s -> %s' % (link_name, source))
success = True
return success | OSError | dataset/ETHPy150Open anishathalye/dotbot/plugins/link.py/Link._link |
def get_form(self, *args, **kwargs):
initial = self.get_form_kwargs()
if 'ip_type' in self.request.GET and 'ip_str' in self.request.GET:
ip_str = self.request.GET['ip_str']
ip_type = self.request.GET['ip_type']
network = calc_parent_str(ip_str, ip_type)
if network and network.vlan and network.site:
expected_name = "{0}.{1}.mozilla.com".format(
network.vlan.name, network.site.get_site_path())
try:
domain = Domain.objects.get(name=expected_name)
except __HOLE__:
domain = None
if domain:
initial['initial'] = {'ip_str': ip_str,
'name': "." + domain.name,
'ip_type': ip_type}
else:
initial['initial'] = {'ip_str': ip_str, 'ip_type': ip_type}
return PTRForm(**initial) | ObjectDoesNotExist | dataset/ETHPy150Open mozilla/inventory/mozdns/ptr/views.py/PTRCreateView.get_form |
def render_html(self, request):
"""
Render the html code of the plugin.
By default, calls the 'index_view' function.
"""
views = __import__('%s.views' % self._get_module_path(), fromlist=[''])
try:
return views.index_view(request, self)
except __HOLE__, e:
raise e
raise PluginViewsNotProperlyConfiguredError(self) | AttributeError | dataset/ETHPy150Open ionyse/ionyweb/ionyweb/plugin/models.py/AbstractPlugin.render_html |
def get_admin_form(self):
"""
Returns the default admin form : `Plugin_PluginNameForm`.
"""
class_name = self.__class__.__name__
forms = __import__('%s.forms' % self._get_module_path(), fromlist=[''])
try:
return getattr(forms, '%sForm' % class_name)
except __HOLE__:
raise PluginAdminNotProperlyConfiguredError(self) | AttributeError | dataset/ETHPy150Open ionyse/ionyweb/ionyweb/plugin/models.py/AbstractPlugin.get_admin_form |
def http_request(self, request):
if not hasattr(request, "add_unredirected_header"):
newrequest = Request(request.get_full_url(), request.data,
request.headers)
try: newrequest.origin_req_host = request.origin_req_host
except AttributeError: pass
try: newrequest.unverifiable = request.unverifiable
except __HOLE__: pass
try: newrequest.visit = request.visit
except AttributeError: pass
request = newrequest
return request | AttributeError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/samples-and-tests/i-am-a-developer/mechanize/_upgrade.py/HTTPRequestUpgradeProcessor.http_request |
def dispatch(self, request, *args, **kwargs):
if 'product_pk' in kwargs:
try:
self.product = Product.objects.get(pk=kwargs['product_pk'])
except __HOLE__:
messages.error(
request, _("The requested product no longer exists"))
return redirect('wishlists-create')
return super(WishListCreateView, self).dispatch(
request, *args, **kwargs) | ObjectDoesNotExist | dataset/ETHPy150Open django-oscar/django-oscar/src/oscar/apps/customer/wishlists/views.py/WishListCreateView.dispatch |
def dispatch(self, request, *args, **kwargs):
try:
self.fetch_line(request.user, kwargs['key'],
line_pk=kwargs['line_pk'])
except (__HOLE__, MultipleObjectsReturned):
raise Http404
return super(WishListMoveProductToAnotherWishList, self).dispatch(
request, *args, **kwargs) | ObjectDoesNotExist | dataset/ETHPy150Open django-oscar/django-oscar/src/oscar/apps/customer/wishlists/views.py/WishListMoveProductToAnotherWishList.dispatch |
def parse_args_impl(parser, _args=None):
global options, commands, arg_line
(options, args) = parser.parse_args(args=_args)
arg_line = args
#arg_line = args[:] # copy
# By default, 'waf' is equivalent to 'waf build'
commands = {}
for var in cmds: commands[var] = 0
if not args:
commands['build'] = 1
args.append('build')
# Parse the command arguments
for arg in args:
commands[arg] = True
# the check thing depends on the build
if 'check' in args:
idx = args.index('check')
try:
bidx = args.index('build')
if bidx > idx:
raise ValueError('build before check')
except __HOLE__, e:
args.insert(idx, 'build')
if args[0] != 'init':
args.insert(0, 'init')
# TODO -k => -j0
if options.keep: options.jobs = 1
if options.jobs < 1: options.jobs = 1
if 'install' in sys.argv or 'uninstall' in sys.argv:
# absolute path only if set
options.destdir = options.destdir and os.path.abspath(os.path.expanduser(options.destdir))
Logs.verbose = options.verbose
Logs.init_log()
if options.zones:
Logs.zones = options.zones.split(',')
if not Logs.verbose: Logs.verbose = 1
elif Logs.verbose > 0:
Logs.zones = ['runner']
if Logs.verbose > 2:
Logs.zones = ['*']
# TODO waf 1.6
# 1. rename the class to OptionsContext
# 2. instead of a class attribute, use a module (static 'parser')
# 3. parse_args_impl was made in times when we did not know about binding new methods to classes | ValueError | dataset/ETHPy150Open appcelerator-archive/poc-nodejs-desktop/Resources/nodejs/builds/linux/node/lib/node/wafadmin/Options.py/parse_args_impl |
def tool_options(self, *k, **kw):
Utils.python_24_guard()
if not k[0]:
raise Utils.WscriptError('invalid tool_options call %r %r' % (k, kw))
tools = Utils.to_list(k[0])
# TODO waf 1.6 remove the global variable tooldir
path = Utils.to_list(kw.get('tdir', kw.get('tooldir', tooldir)))
for tool in tools:
tool = tool.replace('++', 'xx')
if tool == 'java': tool = 'javaw'
if tool.lower() == 'unittest': tool = 'unittestw'
module = Utils.load_tool(tool, path)
try:
fun = module.set_options
except __HOLE__:
pass
else:
fun(kw.get('option_group', self)) | AttributeError | dataset/ETHPy150Open appcelerator-archive/poc-nodejs-desktop/Resources/nodejs/builds/linux/node/lib/node/wafadmin/Options.py/Handler.tool_options |
@defer.inlineCallbacks
def _background_reindex_search(self, progress, batch_size):
target_min_stream_id = progress["target_min_stream_id_inclusive"]
max_stream_id = progress["max_stream_id_exclusive"]
rows_inserted = progress.get("rows_inserted", 0)
INSERT_CLUMP_SIZE = 1000
TYPES = ["m.room.name", "m.room.message", "m.room.topic"]
def reindex_search_txn(txn):
sql = (
"SELECT stream_ordering, event_id FROM events"
" WHERE ? <= stream_ordering AND stream_ordering < ?"
" AND (%s)"
" ORDER BY stream_ordering DESC"
" LIMIT ?"
) % (" OR ".join("type = '%s'" % (t,) for t in TYPES),)
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
rows = txn.fetchall()
if not rows:
return 0
min_stream_id = rows[-1][0]
event_ids = [row[1] for row in rows]
events = self._get_events_txn(txn, event_ids)
event_search_rows = []
for event in events:
try:
event_id = event.event_id
room_id = event.room_id
content = event.content
if event.type == "m.room.message":
key = "content.body"
value = content["body"]
elif event.type == "m.room.topic":
key = "content.topic"
value = content["topic"]
elif event.type == "m.room.name":
key = "content.name"
value = content["name"]
except (__HOLE__, AttributeError):
# If the event is missing a necessary field then
# skip over it.
continue
if not isinstance(value, basestring):
# If the event body, name or topic isn't a string
# then skip over it
continue
event_search_rows.append((event_id, room_id, key, value))
if isinstance(self.database_engine, PostgresEngine):
sql = (
"INSERT INTO event_search (event_id, room_id, key, vector)"
" VALUES (?,?,?,to_tsvector('english', ?))"
)
elif isinstance(self.database_engine, Sqlite3Engine):
sql = (
"INSERT INTO event_search (event_id, room_id, key, value)"
" VALUES (?,?,?,?)"
)
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
for index in range(0, len(event_search_rows), INSERT_CLUMP_SIZE):
clump = event_search_rows[index:index + INSERT_CLUMP_SIZE]
txn.executemany(sql, clump)
progress = {
"target_min_stream_id_inclusive": target_min_stream_id,
"max_stream_id_exclusive": min_stream_id,
"rows_inserted": rows_inserted + len(event_search_rows)
}
self._background_update_progress_txn(
txn, self.EVENT_SEARCH_UPDATE_NAME, progress
)
return len(event_search_rows)
result = yield self.runInteraction(
self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn
)
if not result:
yield self._end_background_update(self.EVENT_SEARCH_UPDATE_NAME)
defer.returnValue(result) | KeyError | dataset/ETHPy150Open matrix-org/synapse/synapse/storage/search.py/SearchStore._background_reindex_search |
def lsb_release():
"""
Get the linux distribution information and return in an attribute dict
The following attributes should be available:
base, distributor_id, description, release, codename
For example Ubuntu Lucid would return
base = debian
distributor_id = Ubuntu
description = Ubuntu 10.04.x LTS
release = 10.04
codename = lucid
"""
output = run('lsb_release -a').split('\n')
release = _AttributeDict({})
for line in output:
try:
key, value = line.split(':')
except __HOLE__:
continue
release[key.strip().replace(' ','_').lower()]=value.strip()
if exists('/etc/debian_version'): release.base = 'debian'
elif exists('/etc/redhat-release'): release.base = 'redhat'
else: release.base = 'unknown'
return release | ValueError | dataset/ETHPy150Open bretth/woven/woven/linux.py/lsb_release |
def port_is_open():
"""
Determine if the default port and user is open for business.
"""
with settings(hide('aborts'), warn_only=True ):
try:
if env.verbosity:
print "Testing node for previous installation on port %s:"% env.port
distribution = lsb_release()
except __HOLE__:
if env.verbosity:
print >> sys.stderr, "\nStopped."
sys.exit(1)
except: #No way to catch the failing connection without catchall?
return False
if distribution.distributor_id <> 'Ubuntu':
print env.host, 'WARNING: Woven has only been tested on Ubuntu >= 10.04. It may not work as expected on',distribution.description
return True | KeyboardInterrupt | dataset/ETHPy150Open bretth/woven/woven/linux.py/port_is_open |
def setUp(self):
_test_c.COUNT = 0
try:
shutil.rmtree(_test_c.TEST_DIR)
except (__HOLE__, IOError, ), e:
if e.errno != errno.ENOENT:
raise e
os.mkdir(_test_c.TEST_DIR, 00750)
os.mkdir(_test_c.ROOT1, 00750)
os.mkdir(_test_c.ROOT2, 00750)
_c.FSQ_ROOT = _test_c.ROOT1
normalize() | OSError | dataset/ETHPy150Open axialmarket/fsq/fsq/tests/FSQTestCase.py/FSQTestCase.setUp |
def tearDown(self):
_c.FSQ_ROOT = _test_c.ORIG_ROOT
try:
shutil.rmtree(_test_c.TEST_DIR)
except(__HOLE__, IOError, ), e:
if e.errno != errno.ENOENT:
raise e
sys.stderr.write('Total Tests Run: {0} ... '.format(_test_c.COUNT))
_test_c.TOTAL_COUNT += _test_c.COUNT
normalize() | OSError | dataset/ETHPy150Open axialmarket/fsq/fsq/tests/FSQTestCase.py/FSQTestCase.tearDown |
def __init__(self, path, data, command_line):
"""
It copies the content of self.path from the initialization routine of
the :class:`Data <data.Data>` class, and defines a handful of useful
methods, that every likelihood might need.
If the nuisance parameters required to compute this likelihood are not
defined (either fixed or varying), the code will stop.
Parameters
----------
data : class
Initialized instance of :class:`Data <data.Data>`
command_line : NameSpace
NameSpace containing the command line arguments
"""
self.name = self.__class__.__name__
self.folder = os.path.abspath(os.path.join(
data.path['MontePython'], 'likelihoods', self.name))
if not data.log_flag:
path = os.path.join(command_line.folder, 'log.param')
# Define some default fields
self.data_directory = ''
# Store all the default fields stored, for the method read_file.
self.default_values = ['data_directory']
# Recover the values potentially read in the input.param file.
if hasattr(data, self.name):
exec("attributes = [e for e in dir(data.%s) if e.find('__') == -1]" % self.name)
for elem in attributes:
exec("setattr(self, elem, getattr(data.%s, elem))" % self.name)
# Read values from the data file
self.read_from_file(path, data, command_line)
# Default state
self.need_update = True
# Check if the nuisance parameters are defined
error_flag = False
try:
for nuisance in self.use_nuisance:
if nuisance not in data.get_mcmc_parameters(['nuisance']):
error_flag = True
warnings.warn(
nuisance + " must be defined, either fixed or " +
"varying, for %s likelihood" % self.name)
self.nuisance = self.use_nuisance
except __HOLE__:
self.use_nuisance = []
self.nuisance = []
# If at least one is missing, raise an exception.
if error_flag:
raise io_mp.LikelihoodError(
"Check your nuisance parameter list for your set of"
"experiments")
# Append to the log.param the value used (WARNING: so far no comparison
# is done to ensure that the experiments share the same parameters)
if data.log_flag:
io_mp.log_likelihood_parameters(self, command_line) | AttributeError | dataset/ETHPy150Open baudren/montepython_public/montepython/likelihood_class.py/Likelihood.__init__ |
def read_from_file(self, path, data, command_line):
"""
Extract the information from the log.param concerning this likelihood.
If the log.param is used, check that at least one item for each
likelihood is recovered. Otherwise, it means the log.param does not
contain information on the likelihood. This happens when the first run
fails early, before calling the likelihoods, and the program did not
log the information. This check might not be completely secure, but it
is better than nothing.
.. warning::
This checks relies on the fact that a likelihood should always have
at least **one** line of code written in the likelihood.data file.
This should be always true, but in case a run fails with the error
message described below, think about it.
.. warning::
As of version 2.0.2, you can specify likelihood options in the
parameter file. They have complete priority over the ones specified
in the `likelihood.data` file, and it will be reflected in the
`log.param` file.
"""
# Counting how many lines are read.
counter = 0
self.path = path
self.dictionary = {}
if os.path.isfile(path):
data_file = open(path, 'r')
for line in data_file:
if line.find('#') == -1:
if line.find(self.name+'.') != -1:
# Recover the name and value from the .data file
regexp = re.match(
"%s.(.*)\s*=\s*(.*)" % self.name, line)
name, value = (
elem.strip() for elem in regexp.groups())
# If this name was already defined in the parameter
# file, be sure to take this value instead. Beware,
# there are a few parameters which are always
# predefined, such as data_directory, which should be
# ignored in this check.
is_ignored = False
if name not in self.default_values:
try:
value = getattr(self, name)
is_ignored = True
except __HOLE__:
pass
if not is_ignored:
exec('self.'+name+' = '+value)
value = getattr(self, name)
counter += 1
self.dictionary[name] = value
data_file.seek(0)
data_file.close()
# Checking that at least one line was read, exiting otherwise
if counter == 0:
raise io_mp.ConfigurationError(
"No information on %s likelihood " % self.name +
"was found in the %s file.\n" % path +
"This can result from a failed initialization of a previous " +
"run. To solve this, you can do a \n " +
"]$ rm -rf %s \n " % command_line.folder +
"Be sure there is noting in it before doing this !") | AttributeError | dataset/ETHPy150Open baudren/montepython_public/montepython/likelihood_class.py/Likelihood.read_from_file |
def need_cosmo_arguments(self, data, dictionary):
"""
Ensure that the arguments of dictionary are defined to the correct
value in the cosmological code
.. warning::
So far there is no way to enforce a parameter where `smaller is
better`. A bigger value will always overried any smaller one
(`cl_max`, etc...)
Parameters
----------
data : dict
Initialized instance of :class:`data`
dictionary : dict
Desired precision for some cosmological parameters
"""
array_flag = False
for key, value in dictionary.iteritems():
try:
data.cosmo_arguments[key]
try:
float(data.cosmo_arguments[key])
num_flag = True
except __HOLE__:
num_flag = False
except TypeError:
num_flag = True
array_flag = True
except KeyError:
try:
float(value)
num_flag = True
data.cosmo_arguments[key] = 0
except ValueError:
num_flag = False
data.cosmo_arguments[key] = ''
except TypeError:
num_flag = True
array_flag = True
if num_flag is False:
if data.cosmo_arguments[key].find(value) == -1:
data.cosmo_arguments[key] += ' '+value+' '
else:
if array_flag is False:
if float(data.cosmo_arguments[key]) < value:
data.cosmo_arguments[key] = value
else:
data.cosmo_arguments[key] = '%.2g' % value[0]
for i in range(1, len(value)):
data.cosmo_arguments[key] += ',%.2g' % (value[i]) | ValueError | dataset/ETHPy150Open baudren/montepython_public/montepython/likelihood_class.py/Likelihood.need_cosmo_arguments |
def read_contamination_spectra(self, data):
for nuisance in self.use_nuisance:
# read spectrum contamination (so far, assumes only temperature
# contamination; will be trivial to generalize to polarization when
# such templates will become relevant)
setattr(self, "%s_contamination" % nuisance,
np.zeros(self.l_max+1, 'float64'))
try:
File = open(os.path.join(
self.data_directory, getattr(self, "%s_file" % nuisance)),
'r')
for line in File:
l = int(float(line.split()[0]))
if ((l >= 2) and (l <= self.l_max)):
exec "self.%s_contamination[l]=float(line.split()[1])/(l*(l+1.)/2./math.pi)" % nuisance
except:
print 'Warning: you did not pass a file name containing '
print 'a contamination spectrum regulated by the nuisance '
print 'parameter '+nuisance
# read renormalization factor
# if it is not there, assume it is one, i.e. do not renormalize
try:
# do the following operation:
# self.nuisance_contamination *= float(self.nuisance_scale)
setattr(self, "%s_contamination" % nuisance,
getattr(self, "%s_contamination" % nuisance) *
float(getattr(self, "%s_scale" % nuisance)))
except AttributeError:
pass
# read central value of nuisance parameter
# if it is not there, assume one by default
try:
getattr(self, "%s_prior_center" % nuisance)
except __HOLE__:
setattr(self, "%s_prior_center" % nuisance, 1.)
# read variance of nuisance parameter
# if it is not there, assume flat prior (encoded through
# variance=0)
try:
getattr(self, "%s_prior_variance" % nuisance)
except:
setattr(self, "%s_prior_variance" % nuisance, 0.) | AttributeError | dataset/ETHPy150Open baudren/montepython_public/montepython/likelihood_class.py/Likelihood.read_contamination_spectra |
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
self.need_cosmo_arguments(
data, {'lensing': 'yes', 'output': 'tCl lCl pCl'})
try:
import clik
except __HOLE__:
raise io_mp.MissingLibraryError(
"You must first activate the binaries from the Clik " +
"distribution. Please run : \n " +
"]$ source /path/to/clik/bin/clik_profile.sh \n " +
"and try again.")
# for lensing, some routines change. Intializing a flag for easier
# testing of this condition
#if self.name == 'Planck_lensing':
if 'lensing' in self.name and 'Planck' in self.name:
self.lensing = True
else:
self.lensing = False
try:
if self.lensing:
self.clik = clik.clik_lensing(self.path_clik)
try:
self.l_max = max(self.clik.get_lmax())
# following 2 lines for compatibility with lensing likelihoods of 2013 and before
# (then, clik.get_lmax() just returns an integer for lensing likelihoods;
# this behavior was for clik versions < 10)
except:
self.l_max = self.clik.get_lmax()
else:
self.clik = clik.clik(self.path_clik)
self.l_max = max(self.clik.get_lmax())
except clik.lkl.CError:
raise io_mp.LikelihoodError(
"The path to the .clik file for the likelihood "
"%s was not found where indicated." % self.name +
" Note that the default path to search for it is"
" one directory above the path['clik'] field. You"
" can change this behaviour in all the "
"Planck_something.data, to reflect your local configuration, "
"or alternatively, move your .clik files to this place.")
except KeyError:
raise io_mp.LikelihoodError(
"In the %s.data file, the field 'clik' of the " % self.name +
"path dictionary is expected to be defined. Please make sure"
" it is the case in you configuration file")
self.need_cosmo_arguments(
data, {'l_max_scalars': self.l_max})
self.nuisance = list(self.clik.extra_parameter_names)
# line added to deal with a bug in planck likelihood release: A_planck called A_Planck in plik_lite
if (self.name == 'Planck_highl_lite'):
for i in range(len(self.nuisance)):
if (self.nuisance[i] == 'A_Planck'):
self.nuisance[i] = 'A_planck'
print "In %s, MontePython corrected nuisance parameter name A_Planck to A_planck" % self.name
# testing if the nuisance parameters are defined. If there is at least
# one non defined, raise an exception.
exit_flag = False
nuisance_parameter_names = data.get_mcmc_parameters(['nuisance'])
for nuisance in self.nuisance:
if nuisance not in nuisance_parameter_names:
exit_flag = True
print '%20s\tmust be a fixed or varying nuisance parameter' % nuisance
if exit_flag:
raise io_mp.LikelihoodError(
"The likelihood %s " % self.name +
"expected some nuisance parameters that were not provided")
# deal with nuisance parameters
try:
self.use_nuisance
except:
self.use_nuisance = []
# Add in use_nuisance all the parameters that have non-flat prior
for nuisance in self.nuisance:
if hasattr(self, '%s_prior_center' % nuisance):
self.use_nuisance.append(nuisance) | ImportError | dataset/ETHPy150Open baudren/montepython_public/montepython/likelihood_class.py/Likelihood_clik.__init__ |
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
# try and import pandas
try:
import pandas
except __HOLE__:
raise io_mp.MissingLibraryError(
"This likelihood has a lot of IO manipulation. You have "
"to install the 'pandas' library to use it. Please type:\n"
"`(sudo) pip install pandas --user`")
# check that every conflicting experiments is not present in the list
# of tested experiments, in which case, complain
if hasattr(self, 'conflicting_experiments'):
for conflict in self.conflicting_experiments:
if conflict in data.experiments:
raise io_mp.LikelihoodError(
'conflicting %s measurements, you can ' % conflict +
' have either %s or %s ' % (self.name, conflict) +
'as an experiment, not both')
# Read the configuration file, supposed to be called self.settings.
# Note that we unfortunately can not
# immediatly execute the file, as it is not formatted as strings.
assert hasattr(self, 'settings') is True, (
"You need to provide a settings file")
self.read_configuration_file() | ImportError | dataset/ETHPy150Open baudren/montepython_public/montepython/likelihood_class.py/Likelihood_sn.__init__ |
def read_configuration_file(self):
"""
Extract Python variables from the configuration file
This routine performs the equivalent to the program "inih" used in the
original c++ library.
"""
settings_path = os.path.join(self.data_directory, self.settings)
with open(settings_path, 'r') as config:
for line in config:
# Dismiss empty lines and commented lines
if line and line.find('#') == -1:
lhs, rhs = [elem.strip() for elem in line.split('=')]
# lhs will always be a string, so set the attribute to this
# likelihood. The right hand side requires more work.
# First case, if set to T or F for True or False
if str(rhs) in ['T', 'F']:
rhs = True if str(rhs) == 'T' else False
# It can also be a path, starting with 'data/'. We remove
# this leading folder path
elif str(rhs).find('data/') != -1:
rhs = rhs.replace('data/', '')
else:
# Try to convert it to a float
try:
rhs = float(rhs)
# If it fails, it is a string
except __HOLE__:
rhs = str(rhs)
# Set finally rhs to be a parameter of the class
setattr(self, lhs, rhs) | ValueError | dataset/ETHPy150Open baudren/montepython_public/montepython/likelihood_class.py/Likelihood_sn.read_configuration_file |
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except __HOLE__ as ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception:
return None | ImportError | dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/openstack/common/config/generator.py/_import_module |
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (__HOLE__, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
try:
if opt_default is None:
print '#%s=<None>' % opt_name
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, str(opt_default).lower())
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print '#%s=%s' % (opt_name, ','.join(opt_default))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print '#%s=%s' % (opt_name, default)
print
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1) | ValueError | dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/openstack/common/config/generator.py/_print_opt |
def load_source(srcfile):
try:
return ''.join(open(srcfile).readlines()) + '\n\n'
except __HOLE__:
print('Could not open', srcfile)
return '' | IOError | dataset/ETHPy150Open milkbikis/powerline-shell/install.py/load_source |
def fetch_url(url, useragent, referer=None, retries=1, dimension=False):
cur_try = 0
nothing = None if dimension else (None, None)
url = clean_url(url)
if not url.startswith(('http://', 'https://')):
return nothing
response = None
while True:
try:
response = requests.get(url, stream=True, timeout=5, headers={
'User-Agent': useragent,
'Referer': referer,
})
# if we only need the dimension of the image, we may not
# need to download the entire thing
if dimension:
content = response.raw.read(chunk_size)
else:
content = response.raw.read()
content_type = response.headers.get('Content-Type')
if not content_type:
return nothing
if 'image' in content_type:
p = ImageFile.Parser()
new_data = content
while not p.image and new_data:
try:
p.feed(new_data)
except __HOLE__:
traceback.print_exc()
p = None
break
except ValueError:
traceback.print_exc()
p = None
break
except Exception as e:
# For some favicon.ico images, the image is so small
# that our PIL feed() method fails a length test.
is_favicon = (urls.url_to_filetype(url) == 'ico')
if is_favicon:
pass
else:
raise e
p = None
break
new_data = response.raw.read(chunk_size)
content += new_data
if p is None:
return nothing
# return the size, or return the data
if dimension and p.image:
return p.image.size
elif dimension:
return nothing
elif dimension:
# expected an image, but didn't get one
return nothing
return content_type, content
except requests.exceptions.RequestException as e:
cur_try += 1
if cur_try >= retries:
log.debug('error while fetching: %s refer: %s' %
(url, referer))
return nothing
finally:
if response is not None:
response.raw.close()
if response.raw._connection:
response.raw._connection.close() | IOError | dataset/ETHPy150Open codelucas/newspaper/newspaper/images.py/fetch_url |
def thumbnail(self):
"""Identifies top image, trims out a thumbnail and also has a url
"""
image_url = self.largest_image_url()
if image_url:
content_type, image_str = fetch_url(image_url, referer=self.url)
if image_str:
image = str_to_image(image_str)
try:
image = prepare_image(image)
except __HOLE__ as e:
if 'interlaced' in e.message:
return None
return image, image_url
return None, None | IOError | dataset/ETHPy150Open codelucas/newspaper/newspaper/images.py/Scraper.thumbnail |
def from_dtype(dtype):
"""
Return a Numba Type instance corresponding to the given Numpy *dtype*.
NotImplementedError is raised on unsupported Numpy dtypes.
"""
if dtype.fields is None:
try:
return FROM_DTYPE[dtype]
except __HOLE__:
if dtype.char in 'SU':
return _from_str_dtype(dtype)
if dtype.char in 'mM':
return _from_datetime_dtype(dtype)
if dtype.char in 'V':
subtype = from_dtype(dtype.subdtype[0])
return types.NestedArray(subtype, dtype.shape)
raise NotImplementedError(dtype)
else:
return from_struct_dtype(dtype) | KeyError | dataset/ETHPy150Open numba/numba/numba/numpy_support.py/from_dtype |
def map_arrayscalar_type(val):
if isinstance(val, numpy.generic):
# We can't blindly call numpy.dtype() as it loses information
# on some types, e.g. datetime64 and timedelta64.
dtype = val.dtype
else:
try:
dtype = numpy.dtype(type(val))
except __HOLE__:
raise NotImplementedError("no corresponding numpy dtype for %r" % type(val))
return from_dtype(dtype) | TypeError | dataset/ETHPy150Open numba/numba/numba/numpy_support.py/map_arrayscalar_type |
def supported_ufunc_loop(ufunc, loop):
"""Return whether the *loop* for the *ufunc* is supported -in nopython-.
*loop* should be a UFuncLoopSpec instance, and *ufunc* a numpy ufunc.
For ufuncs implemented using the ufunc_db, it is supported if the ufunc_db
contains a lowering definition for 'loop' in the 'ufunc' entry.
For other ufuncs, it is type based. The loop will be considered valid if it
only contains the following letter types: '?bBhHiIlLqQfd'. Note this is
legacy and when implementing new ufuncs the ufunc_db should be preferred,
as it allows for a more fine-grained incremental support.
"""
from .targets import ufunc_db
loop_sig = loop.ufunc_sig
try:
# check if the loop has a codegen description in the
# ufunc_db. If so, we can proceed.
# note that as of now not all ufuncs have an entry in the
# ufunc_db
supported_loop = loop_sig in ufunc_db.get_ufunc_info(ufunc)
except __HOLE__:
# for ufuncs not in ufunc_db, base the decision of whether the
# loop is supported on its types
loop_types = [x.char for x in loop.numpy_inputs + loop.numpy_outputs]
supported_types = '?bBhHiIlLqQfd'
# check if all the types involved in the ufunc loop are
# supported in this mode
supported_loop = all(t in supported_types for t in loop_types)
return supported_loop | KeyError | dataset/ETHPy150Open numba/numba/numba/numpy_support.py/supported_ufunc_loop |
def ufunc_find_matching_loop(ufunc, arg_types):
"""Find the appropriate loop to be used for a ufunc based on the types
of the operands
ufunc - The ufunc we want to check
arg_types - The tuple of arguments to the ufunc, including any
explicit output(s).
return value - A UFuncLoopSpec identifying the loop, or None
if no matching loop is found.
"""
# Separate logical input from explicit output arguments
input_types = arg_types[:ufunc.nin]
output_types = arg_types[ufunc.nin:]
assert(len(input_types) == ufunc.nin)
try:
np_input_types = [as_dtype(x) for x in input_types]
except __HOLE__:
return None
try:
np_output_types = [as_dtype(x) for x in output_types]
except NotImplementedError:
return None
def choose_types(numba_types, ufunc_letters):
"""
Return a list of Numba types representing *ufunc_letters*,
except when the letter designates a datetime64 or timedelta64,
in which case the type is taken from *numba_types*.
"""
assert len(ufunc_letters) >= len(numba_types)
types = [tp if letter in 'mM' else from_dtype(numpy.dtype(letter))
for tp, letter in zip(numba_types, ufunc_letters)]
# Add missing types (presumably implicit outputs)
types += [from_dtype(numpy.dtype(letter))
for letter in ufunc_letters[len(numba_types):]]
return types
# In NumPy, the loops are evaluated from first to last. The first one
# that is viable is the one used. One loop is viable if it is possible
# to cast every input operand to the one expected by the ufunc.
# Also under NumPy 1.10+ the output must be able to be cast back
# to a close enough type ("same_kind").
for candidate in ufunc.types:
ufunc_inputs = candidate[:ufunc.nin]
ufunc_outputs = candidate[-ufunc.nout:]
if 'O' in ufunc_inputs:
# Skip object arrays
continue
found = True
# Skip if any input or output argument is mismatching
for outer, inner in zip(np_input_types, ufunc_inputs):
# (outer is a dtype instance, inner is a type char)
if outer.char in 'mM' or inner in 'mM':
# For datetime64 and timedelta64, we want to retain
# precise typing (i.e. the units); therefore we look for
# an exact match.
if outer.char != inner:
found = False
break
elif not numpy.can_cast(outer.char, inner, 'safe'):
found = False
break
if found and strict_ufunc_typing:
# Can we cast the inner result to the outer result type?
for outer, inner in zip(np_output_types, ufunc_outputs):
if (outer.char not in 'mM' and not
numpy.can_cast(inner, outer.char, 'same_kind')):
found = False
break
if found:
# Found: determine the Numba types for the loop's inputs and
# outputs.
inputs = choose_types(input_types, ufunc_inputs)
outputs = choose_types(output_types, ufunc_outputs)
return UFuncLoopSpec(inputs, outputs, candidate)
return None | NotImplementedError | dataset/ETHPy150Open numba/numba/numba/numpy_support.py/ufunc_find_matching_loop |
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by
the last name in the path. Raise ImproperlyConfigured if something goes
wrong.
Backported from Django 1.6.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except __HOLE__:
raise ImproperlyConfigured("{0}{1} doesn't look like "
"a module path".format(error_prefix,
dotted_path))
try:
module = import_module(module_path)
except ImportError as err:
msg = '{0}Error importing module {1}: "{2}"'.format(error_prefix,
module_path,
err)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
try:
attr = getattr(module, class_name)
except AttributeError:
raise ImproperlyConfigured('{0}Module "{1}" does not define a '
'"{2}" attribute/class'.format(error_prefix,
module_path,
class_name))
return attr | ValueError | dataset/ETHPy150Open jazzband/django-configurations/configurations/utils.py/import_by_path |
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
if type(func) is partial:
orig_func = func.func
argspec = getargspec(orig_func)
args = list(argspec[0])
defaults = list(argspec[3] or ())
kwoargs = list(argspec[4])
kwodefs = dict(argspec[5] or {})
if func.args:
args = args[len(func.args):]
for arg in func.keywords or ():
try:
i = args.index(arg) - len(args)
del args[i]
try:
del defaults[i]
except IndexError:
pass
except __HOLE__: # must be a kwonly arg
i = kwoargs.index(arg)
del kwoargs[i]
del kwodefs[arg]
return inspect.FullArgSpec(args, argspec[1], argspec[2],
tuple(defaults), kwoargs,
kwodefs, argspec[6])
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
return inspect.getfullargspec(func) | ValueError | dataset/ETHPy150Open jazzband/django-configurations/configurations/utils.py/getargspec |
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.im_func
parts = 0, ()
if type(func) is partial:
keywords = func.keywords
if keywords is None:
keywords = {}
parts = len(func.args), keywords.keys()
func = func.func
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
args, varargs, varkw = inspect.getargs(func.func_code)
func_defaults = func.func_defaults
if func_defaults is None:
func_defaults = []
else:
func_defaults = list(func_defaults)
if parts[0]:
args = args[parts[0]:]
if parts[1]:
for arg in parts[1]:
i = args.index(arg) - len(args)
del args[i]
try:
del func_defaults[i]
except __HOLE__:
pass
return inspect.ArgSpec(args, varargs, varkw, func_defaults) | IndexError | dataset/ETHPy150Open jazzband/django-configurations/configurations/utils.py/getargspec |
def removeStream(self, stream):
'''Unregister *stream* from this circuit.
Remove the stream from this circuit's stream map and send a
RelayEndCell. If the number of streams on this circuit drops to
zero, check with the circuit manager to see if this circuit should
be destroyed. If so, tear down the circuit.
:param oppy.stream.stream.Stream stream: stream to unregister
'''
try:
del self._streams[stream.stream_id]
cell = RelayEndCell.make(self.circuit_id, stream.stream_id)
self._encryptAndSendCell(cell)
except __HOLE__:
msg = ("Circuit {} notified that stream {} was closed, but "
"the circuit has no reference to this stream."
.format(self.circuit_id, stream.stream_id))
logging.debug(msg)
return
if len(self._streams) == 0:
if self._circuit_manager.shouldDestroyCircuit(self) is True:
self._sendDestroyCell()
self._closeCircuit()
msg = "Destroyed unused circuit {}.".format(self.circuit_id)
logging.debug(msg) | KeyError | dataset/ETHPy150Open nskinkel/oppy/oppy/circuit/circuit.py/Circuit.removeStream |
def _processRelayDataCell(self, cell, origin):
'''Called when this circuit receives an incoming RelayData cell.
Take the following actions:
1. Pass the relay payload in this cell to the stream with the
stream_id contained in this RelayData cell. Drop the cell
if we have no reference to the stream_id contained in the
cell.
2. Decrement this circuit's delivery window (which will
automatically send a RelaySendMeCell if this circuit's
deliver window is low enough).
:param oppy.cell.relay.RelayDataCell cell: relay data cell recieved
from the network
:param int origin: which node on the circuit's path this cell
came from
'''
sid = cell.rheader.stream_id
try:
self._streams[sid].recv(cell.rpayload)
self._decDeliverWindow()
except __HOLE__:
msg = ("Circuit {} received a RelayDataCell for nonexistent "
"stream {}. Dropping cell.".format(self.circuit_id, sid))
logging.debug(msg) | KeyError | dataset/ETHPy150Open nskinkel/oppy/oppy/circuit/circuit.py/Circuit._processRelayDataCell |
def _processRelayEndCell(self, cell, origin):
'''Called when this circuit receives a RelayEndCell.
Tear down the stream associated with the stream in the RelayEndCell
if this circuit has a reference to it. Drop the cell if we have
no reference to this stream.
:param oppy.cell.relay.RelayEndCell cell: relay end cell recieved
from the network
:param int origin: which node on the circuit's path this cell
came from
'''
sid = cell.rheader.stream_id
try:
self._streams[sid].closeFromCircuit()
# TODO: handle REASON_EXITPOLICY
if cell.reason != REASON_DONE:
msg = ("Circuit {} received a RelayEndCell for stream {}, "
"and reason was not REASON_DONE. Reason: {}."
.format(self.circuit_id, sid, cell.reason))
logging.debug(msg)
except __HOLE__:
msg = ("Circuit {} received a RelayEndCell for nonexistent "
"stream {}. Dropping cell.".format(self.circuit_id, sid))
logging.debug(msg) | KeyError | dataset/ETHPy150Open nskinkel/oppy/oppy/circuit/circuit.py/Circuit._processRelayEndCell |
def _processRelayConnectedCell(self, cell, origin):
'''Called when this circuit receives a RelayConnectedCell.
Notify the stream associated with this cell's stream id that it's
now connected. Drop the cell if we have no reference to this
stream id.
:param oppy.cell.relay.RelayConnectedCell cell: relay connected cell
recieved from the network
:param int origin: which node on the circuit's path this cell
came from
'''
sid = cell.rheader.stream_id
try:
self._streams[sid].streamConnected()
except __HOLE__:
msg = ("Circuit {} received a RelayConnectedCell for nonexistent "
"stream {}. Dropping cell.".format(self.circuit_id, sid))
logging.debug(msg)
return
msg = ("Circuit {} received a RelayConnectedCell for stream {}"
.format(self.circuit_id, sid))
logging.debug(msg) | KeyError | dataset/ETHPy150Open nskinkel/oppy/oppy/circuit/circuit.py/Circuit._processRelayConnectedCell |
def _processRelaySendMeCell(self, cell, origin):
'''Called when this circuit receives a RelaySendMeCell.
If this is a circuit-level sendme cell (i.e. its stream id is zero)
then increment this circuit's packaging window. If this circuit
is currently in state CState.BUFFERING **and** receiving this
sendme cell has incremented its packaging window > 0, then begin
listening for incoming data again (i.e. self._pollWriteQueue).
If this is a stream-level sendme cell, increment the corresponding
stream's packaging window. Drop the cell if we have no reference
to the stream associated with its stream id.
Drop this cell if it's received while we're still building the
circuit.
:param oppy.cell.relay.RelaySendMeCell cell: relay sendme cell
recieved from the network
:param int origin: which node on the circuit's path this cell
came from
'''
sid = cell.rheader.stream_id
if sid == 0:
self._incPackageWindow()
else:
try:
self._streams[sid].incPackageWindow()
except __HOLE__:
msg = ("Circuit {} received a RelaySendMe cell on nonexistent"
" stream {}. Dropping cell."
.format(self.circuit_id, sid))
logging.debug(msg) | KeyError | dataset/ETHPy150Open nskinkel/oppy/oppy/circuit/circuit.py/Circuit._processRelaySendMeCell |
def import_entry_point(fullname):
"""Given a name import the class and return it."""
(module_name, classname) = partition(fullname)
try:
import_module(module_name)
# This is done to ensure we get the right submodule
module = __import__(module_name)
for submodule in module_name.split('.')[1:]:
module = getattr(module, submodule)
LOG.debug("Importing class: %s", classname)
cls = getattr(module, classname)
# TODO(harlowja) actually verify this is a class??
except (ImportError, __HOLE__, ValueError) as err:
raise RuntimeError('Could not load entry point %s: %s' %
(fullname, err))
return cls | AttributeError | dataset/ETHPy150Open openstack/anvil/anvil/importer.py/import_entry_point |
def import_module(module_name):
try:
LOG.debug("Importing module: %s", module_name)
__import__(module_name)
return sys.modules.get(module_name, None)
except (__HOLE__, ValueError) as err:
raise RuntimeError('Could not load module %s: %s' %
(module_name, err)) | ImportError | dataset/ETHPy150Open openstack/anvil/anvil/importer.py/import_module |
@click.command(help='A command to calculate and return the next steps for a ci build to run')
@pass_config
def next_step(config):
step = 'dummy'
# SolanoCI
if config.build_vendor == 'SolanoCI':
profile = os.environ.get('SOLANO_PROFILE_NAME')
i_current_step = 0
if profile:
if config.build_type == 'feature':
try:
i_current_step = config.steps_feature.index(profile)
if len(config.steps_feature) > i_current_step + 1:
step = config.steps_feature[i_current_step + 1]
except ValueError:
pass
elif config.build_type == 'master':
try:
i_current_step = config.steps_master.index(profile)
if len(config.steps_master) > i_current_step + 1:
step = config.steps_master[i_current_step + 1]
except __HOLE__:
pass
# The first step is manually specified in the plan, so the first dynamic step starts at 2
step_var = 'plan_step_%s' % str(i_current_step + 2)
click.echo('Writing next step %s as %s to solano-plan-variables.json' % (step, step_var))
f = open('solano-plan-variables.json', 'w')
data = {step_var: step}
f.write(json.dumps(data))
f.close()
return
# command: ci beta_deploy | ValueError | dataset/ETHPy150Open SalesforceFoundation/CumulusCI/cli/cumulusci.py/next_step |
def _set_souma_id(app):
""" Sets the SOUMA_ID of the local souma in the configuration of app.
Loads (or creates and loades) the secret key of the local souma and uses it
to set the id of the local souma.
Args:
app: A flask app
"""
# Load/set secret key
try:
with open(app.config["SECRET_KEY_FILE"], 'rb') as f:
app.config['SECRET_KEY'] = f.read(24)
except __HOLE__:
# Create new secret key
app.logger.debug("Creating new secret key")
app.config['SECRET_KEY'] = os.urandom(24)
with open(app.config["SECRET_KEY_FILE"], 'wb') as f:
os.chmod(app.config["SECRET_KEY_FILE"], 0700)
f.write(app.config['SECRET_KEY'])
# Generate ID used to identify this machine
app.config['SOUMA_ID'] = SHA256.new(app.config['SECRET_KEY'] + str(app.config['LOCAL_PORT'])).hexdigest()[:32] | IOError | dataset/ETHPy150Open ciex/souma/nucleus/helpers.py/_set_souma_id |
def reset_userdata():
"""Reset all userdata files"""
from web_ui import app
for fileid in ["DATABASE", "SECRET_KEY_FILE", "PASSWORD_HASH_FILE"]:
try:
os.remove(app.config[fileid])
except __HOLE__:
app.logger.warning("RESET: {} {} not found".format(fileid, app.config[fileid]))
else:
app.logger.warning("RESET: {} {} deleted".format(fileid, app.config[fileid])) | OSError | dataset/ETHPy150Open ciex/souma/web_ui/helpers.py/reset_userdata |
def watch_layouts(continuous=True):
"""Watch layout file and update layout definitions once they change
Parameters:
continuous (bool): Set False to only load definitions once
Returns:
dict: Layout definitions if `continuous` is False
"""
import json
from web_ui import app
mtime_last = 0
layout_filename = os.path.join(app.config["RUNTIME_DIR"], 'static', 'layouts.json')
cont = True
while cont is True:
mtime_cur = os.path.getmtime(layout_filename)
if mtime_cur != mtime_last:
try:
with open(layout_filename) as f:
app.config['LAYOUT_DEFINITIONS'] = json.load(f)
except __HOLE__:
app.logger.error("Failed loading layout definitions")
app.config['LAYOUT_DEFINITIONS'] = dict()
else:
app.logger.info("Loaded {} layout definitions".format(len(app.config["LAYOUT_DEFINITIONS"])))
mtime_last = mtime_cur
cont = True if continuous is True else False
sleep(1)
return app.config["LAYOUT_DEFINITIONS"] | IOError | dataset/ETHPy150Open ciex/souma/web_ui/helpers.py/watch_layouts |
def top_contributors(request, area):
"""Top contributors list view."""
try:
page = int(request.GET.get('page', 1))
except __HOLE__:
page = 1
page_size = 100
locale = _validate_locale(request.GET.get('locale'))
product = request.GET.get('product')
if product:
product = get_object_or_404(Product, slug=product)
if area == 'army-of-awesome':
results, total = top_contributors_aoa(
locale=locale, count=page_size, page=page)
locales = settings.SUMO_LANGUAGES
elif area == 'questions':
results, total = top_contributors_questions(
locale=locale, product=product, count=page_size, page=page)
locales = QuestionLocale.objects.locales_list()
elif area == 'kb':
results, total = top_contributors_kb(
product=product, count=page_size, page=page)
locales = None
elif area == 'l10n':
results, total = top_contributors_l10n(
locale=locale, product=product, count=page_size, page=page)
locales = settings.SUMO_LANGUAGES
else:
raise Http404
return render(request, 'community/top_contributors.html', {
'results': results,
'total': total,
'area': area,
'locale': locale,
'locales': locales,
'product': product,
'products': Product.objects.filter(visible=True),
'page': page,
'page_size': page_size,
}) | ValueError | dataset/ETHPy150Open mozilla/kitsune/kitsune/community/views.py/top_contributors |
def write(self, distFolder, classFilter=None, callback="apiload", showInternals=False, showPrivates=False, printErrors=True, highlightCode=True):
"""
Writes API data generated from JavaScript into distFolder
:param distFolder: Where to store the API data
:param classFilter: Tuple of classes or method to use for filtering
:param callback: Name of callback to use for loading or None if pure JSON should be used
:param showInternals: Include internal methods inside API data
:param showPrivates: Include private methods inside API data
:param printErrors: Whether errors should be printed to the console
:param highlightCode: Whether to enable code highlighting using Pygments
:type distFolder: str
:type classFilter: tuple or function
:type callback: function
:type showInternals: bool
:type showPrivates: bool
:type printErrors: bool
:type highlightCode: bool
"""
#
# Collecting
#
Console.info("Collecting API Data...")
Console.indent()
apiData = {}
highlightedCode = {}
for project in self.__session.getProjects():
classes = project.getClasses()
Console.info("Loading API of project %s: %s...", Console.colorize(project.getName(), "bold"), Console.colorize("%s classes" % len(classes), "cyan"))
Console.indent()
for className in classes:
if self.__isIncluded(className, classFilter):
data = classes[className].getApi(highlightCode)
if not data.isEmpty:
apiData[className] = data
highlightedCode[className] = classes[className].getHighlightedCode()
else:
Console.info("Skipping %s, class is empty." % className)
Console.outdent()
Console.outdent()
#
# Processing
#
Console.info("Processing API Data...")
Console.indent()
data, index, search = self.__process(apiData, classFilter=classFilter, internals=showInternals, privates=showPrivates, printErrors=printErrors, highlightCode=highlightCode)
Console.outdent()
#
# Writing
#
Console.info("Storing API data...")
Console.indent()
writeCounter = 0
extension = "js" if callback else "json"
compress = True
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
def encode(content, name):
if compress:
jsonContent = json.dumps(content, sort_keys=True, cls=JsonEncoder, separators=(',',':'))
else:
jsonContent = json.dumps(content, sort_keys=True, cls=JsonEncoder, indent=2)
if callback:
return "%s(%s,'%s');" % (callback, jsonContent, name)
else:
return jsonContent
Console.info("Saving class data (%s files)...", len(data))
Console.indent()
for className in data:
try:
classData = data[className]
if type(classData) is dict:
classExport = classData
else:
classExport = classData.export()
File.write(self.__session.expandFileName(os.path.join(distFolder, "%s.%s" % (className, extension))), encode(classExport, className))
except TypeError as writeError:
Console.error("Could not write API data of: %s: %s", className, writeError)
continue
Console.outdent()
if highlightCode:
Console.info("Saving highlighted code (%s files)...", len(highlightedCode))
Console.indent()
for className in highlightedCode:
try:
File.write(self.__session.expandFileName(os.path.join(distFolder, "%s.html" % className)), highlightedCode[className])
except __HOLE__ as writeError:
Console.error("Could not write highlighted code of: %s: %s", className, writeError)
continue
Console.outdent()
Console.info("Writing index...")
Console.indent()
File.write(self.__session.expandFileName(os.path.join(distFolder, "meta-index.%s" % extension)), encode(index, "meta-index"))
File.write(self.__session.expandFileName(os.path.join(distFolder, "meta-search.%s" % extension)), encode(search, "meta-search"))
Console.outdent()
Console.outdent() | TypeError | dataset/ETHPy150Open zynga/jasy/jasy/js/api/Writer.py/ApiWriter.write |
@get('/')
def index(request):
try:
# Should raise an error.
return 'What? Somehow found a remote user: %s' % request.REMOTE_USER
except __HOLE__:
pass
return "Remote Addr: '%s' & GET name: '%s'" % (request.REMOTE_ADDR, request.GET.get('name', 'Not found')) | KeyError | dataset/ETHPy150Open toastdriven/itty/examples/auto_environ_access.py/index |
def test_shell_background_support_setsid(both_debug_modes, setsid_enabled):
"""In setsid mode, dumb-init should suspend itself and its children when it
receives SIGTSTP, SIGTTOU, or SIGTTIN.
"""
proc = Popen(
('dumb-init', sys.executable, '-m', 'tests.lib.print_signals'),
stdout=PIPE,
)
match = re.match(b'^ready \(pid: ([0-9]+)\)\n$', proc.stdout.readline())
pid = match.group(1).decode('ascii')
for signum in SUSPEND_SIGNALS:
# both dumb-init and print_signals should be running or sleeping
assert process_state(pid) in ['running', 'sleeping']
assert process_state(proc.pid) in ['running', 'sleeping']
# both should now suspend
proc.send_signal(signum)
for _ in range(1000):
time.sleep(0.001)
try:
assert process_state(proc.pid) == 'stopped'
assert process_state(pid) == 'stopped'
except __HOLE__:
pass
else:
break
else:
raise RuntimeError('Timed out waiting for processes to stop.')
# and then both wake up again
proc.send_signal(SIGCONT)
assert (
proc.stdout.readline() == '{0}\n'.format(SIGCONT).encode('ascii')
)
assert process_state(pid) in ['running', 'sleeping']
assert process_state(proc.pid) in ['running', 'sleeping']
for pid in pid_tree(proc.pid):
os.kill(pid, SIGKILL) | AssertionError | dataset/ETHPy150Open Yelp/dumb-init/tests/shell_background_test.py/test_shell_background_support_setsid |
def feed(request, url, feed_dict=None):
if not feed_dict:
raise Http404, "No feeds are registered."
try:
slug, param = url.split('/', 1)
except ValueError:
slug, param = url, ''
try:
f = feed_dict[slug]
except __HOLE__:
raise Http404, "Slug %r isn't registered." % slug
try:
feedgen = f(slug, request.path).get_feed(param)
except feeds.FeedDoesNotExist:
raise Http404, "Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/syndication/views.py/feed |
def interactive_authorize(self, consumer, app, **kwargs):
from textwrap import fill
# Suppress batchhttp.client's no-log-handler warning.
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
if not isinstance(consumer, oauth.OAuthConsumer):
consumer = oauth.OAuthConsumer(*consumer)
if not isinstance(app, typepad.Application):
app = typepad.Application.get_by_id(app)
# Set up an oauth client for our signed requestses.
oauth_client = OAuthClient(consumer, None)
oauth_client.request_token_url = app.oauth_request_token_url
oauth_client.access_token_url = app.oauth_access_token_url
oauth_client.authorization_url = app.oauth_authorization_url
# Get a request token for the viewer to interactively authorize.
request_token = oauth_client.fetch_request_token(None)
log.debug("Got request token %r", request_token)
# Ask the viewer to authorize it.
approve_url = oauth_client.authorize_token(params=kwargs)
log.debug("Asking viewer to authorize token with URL %r", approve_url)
print fill("""To join your application %r, follow this link and click "Allow":"""
% app.name, width=78)
print
print "<%s>" % approve_url
print
try:
verifier = raw_input('Enter the verifier code TypePad gave you: ')
except __HOLE__:
print
return
# Exchange the authorized request token for an access token.
access_token = oauth_client.fetch_access_token(verifier=verifier)
# Re-authorize ourselves using that access token, so we can make authenticated requests with it.
domain = urlparse.urlsplit(self.endpoint)[1]
self.add_credentials(consumer, access_token, domain=domain)
# Make sure the key works.
typepad.client.batch_request()
user = typepad.User.get_self()
typepad.client.complete_batch()
# Yay! Give the access token to the viewer for their reference.
print
print fill("""Yay! This new access token authorizes this typepad.client to act as %s (%s). Here's the token:"""
% (user.display_name, user.url_id), width=78)
print """
Key: %s
Secret: %s
""" % (access_token.key, access_token.secret)
print fill("""Pass this access token to typepad.client.add_credentials() to re-authorize as %s later."""
% user.display_name, width=78)
print
return access_token | KeyboardInterrupt | dataset/ETHPy150Open typepad/python-typepad-api/typepad/tpclient.py/OAuthHttp.interactive_authorize |
def _channel_id_to_PIL(channel_id, color_mode):
if ChannelID.is_known(channel_id):
if channel_id == ChannelID.TRANSPARENCY_MASK:
return 'A'
warnings.warn("Channel %s (%s) is not handled" % (channel_id, ChannelID.name_of(channel_id)))
return None
try:
assert channel_id >= 0
if color_mode == ColorMode.RGB:
return 'RGB'[channel_id]
elif color_mode == ColorMode.CMYK:
return 'CMYK'[channel_id]
elif color_mode == ColorMode.GRAYSCALE:
return 'L'[channel_id]
except __HOLE__:
# spot channel
warnings.warn("Spot channel %s is not handled" % channel_id)
return None | IndexError | dataset/ETHPy150Open psd-tools/psd-tools/src/psd_tools/user_api/pil_support.py/_channel_id_to_PIL |
def __enter__(self):
try:
open(self.workdir)
assert 0
except IOError:
subprocess.call(["mkdir", "-p", '%s/db' % self.workdir])
proc_args = [ "mongod",
"--dbpath=%s/db" % self.workdir,
"--nojournal",
"--noprealloc",
"--port=22334"]
#print "starting mongod", proc_args
self.mongo_proc = subprocess.Popen(
proc_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.workdir, # this prevented mongod assertion fail
)
try:
interval = .125
while interval <= 2:
if interval > .125:
print "Waiting for mongo to come up"
time.sleep(interval)
interval *= 2
if self.db_up():
break
if self.db_up():
return self
else:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except __HOLE__:
pass # if it crashed there is no such process
out, err = self.mongo_proc.communicate()
print >> sys.stderr, out
print >> sys.stderr, err
raise RuntimeError('No database connection', proc_args)
except Exception, e:
try:
os.kill(self.mongo_proc.pid, signal.SIGTERM)
except OSError:
pass # if it crashed there is no such process
raise e | OSError | dataset/ETHPy150Open hyperopt/hyperopt/hyperopt/tests/test_mongoexp.py/TempMongo.__enter__ |
def generate_itemSimOnTypeSet():
prefs = {}
result = {}
try:
with open(os.getcwd() + '//ml-100k' + '/u.item') as item:
for line in item:
typeVector = line.split('|')[5:24]
itemId = line.split('|')[0]
prefs[itemId] = typeVector
result.setdefault(itemId, {})
except __HOLE__ as err:
print('File error: ' + str(err))
#print similarity.sim_itemType(prefs['1677'],prefs['1678'],19)
for key1, value1 in prefs.items():
for key2, value2 in prefs.items():
if key1 != key2:
s = similarity.sim_itemType(value1, value2, 19)
print
key1, key2, s
result[key1][key2] = s
dumpPickle(result, '/itemSimOnType.pkl') | IOError | dataset/ETHPy150Open clasnake/recommender/tool.py/generate_itemSimOnTypeSet |
def test_badcon(self):
self.top.add("driver", FixedPointIterator())
self.top.add("simple", Simple2())
self.top.driver.workflow.add('simple')
self.top.driver.add_constraint('simple.invar - simple.outvar = 0')
self.top.driver.add_parameter('simple.invar')
try:
self.top.run()
except __HOLE__, err:
msg = "driver: Please specify constraints in the form 'A=B'"
msg += ': simple.invar - simple.outvar = 0'
self.assertEqual(str(err), msg)
else:
self.fail('RuntimeError expected')
self.top.driver.clear_constraints()
self.top.driver.add_constraint('simple.invar - simple.outvar = simple.exec_count')
try:
self.top.run()
except RuntimeError, err:
msg = "driver: Please specify constraints in the form 'A=B'"
msg += ': simple.invar - simple.outvar = simple.exec_count'
self.assertEqual(str(err), msg)
else:
self.fail('RuntimeError expected') | RuntimeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/test/test_iterate.py/FixedPointIteratorTestCase.test_badcon |
def test_check_config(self):
self.top.add("driver", FixedPointIterator())
self.top.add("simple", Multi())
self.top.driver.workflow.add('simple')
try:
self.top.run()
except __HOLE__, err:
msg = "driver: FixedPointIterator requires a cyclic workflow, or a " + \
"parameter/constraint pair."
self.assertEqual(str(err), msg)
else:
self.fail('RuntimeError expected')
self.top.driver.add_constraint('simple.out1 - simple.in1 = 0')
try:
self.top.run()
except RuntimeError, err:
msg = "driver: The number of input parameters must equal the " \
"number of output constraint equations in FixedPointIterator."
self.assertEqual(str(err), msg)
else:
self.fail('RuntimeError expected')
self.top.driver.add_parameter('simple.in1')
self.top.driver.add_parameter('simple.in2')
try:
self.top.run()
except RuntimeError, err:
msg = "driver: The number of input parameters must equal the " \
"number of output constraint equations in FixedPointIterator."
self.assertEqual(str(err), msg)
else:
self.fail('RuntimeError expected') | RuntimeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/test/test_iterate.py/FixedPointIteratorTestCase.test_check_config |
def start_pillows(pillows=None):
"""
Actual runner for running pillow processes. Use this to run pillows.
"""
run_pillows = pillows or get_all_pillow_instances()
try:
while True:
jobs = []
print "[pillowtop] Starting pillow processes"
for pillow_class in run_pillows:
p = multiprocessing.Process(target=pillow_class.run)
p.start()
jobs.append(p)
print "[pillowtop] all processes started, pids: %s" % ([x.pid for x in jobs])
for j in jobs:
j.join()
print "[pillowtop] All processes complete, restarting"
except __HOLE__:
sys.exit() | KeyboardInterrupt | dataset/ETHPy150Open dimagi/commcare-hq/corehq/ex-submodules/pillowtop/run_pillowtop.py/start_pillows |
def upload_object_via_stream(self, iterator, container, object_name,
extra=None):
if isinstance(iterator, file):
iterator = iter(iterator)
data_hash = hashlib.md5()
generator = read_in_chunks(iterator, CHUNK_SIZE, True)
bytes_transferred = 0
try:
chunk = next(generator)
except __HOLE__:
chunk = ''
path = self._namespace_path(container.name + '/' + object_name)
method = 'PUT'
if extra is not None:
content_type = extra.get('content_type', None)
else:
content_type = None
if not content_type:
content_type, _ = guess_file_mime_type(object_name)
if not content_type:
raise AttributeError(
'File content-type could not be guessed and' +
' no content_type value provided')
try:
self.connection.request(path + '?metadata/system')
except AtmosError:
e = sys.exc_info()[1]
if e.code != 1003:
raise
method = 'POST'
while True:
end = bytes_transferred + len(chunk) - 1
data_hash.update(b(chunk))
headers = {
'x-emc-meta': 'md5=' + data_hash.hexdigest(),
'Content-Type': content_type,
}
if len(chunk) > 0 and bytes_transferred > 0:
headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end)
method = 'PUT'
result = self.connection.request(path, method=method, data=chunk,
headers=headers)
bytes_transferred += len(chunk)
try:
chunk = next(generator)
except StopIteration:
break
if len(chunk) == 0:
break
data_hash = data_hash.hexdigest()
if extra is None:
meta_data = {}
else:
meta_data = extra.get('meta_data', {})
meta_data['md5'] = data_hash
user_meta = ', '.join([k + '=' + str(v) for k, v in
list(meta_data.items())])
self.connection.request(path + '?metadata/user', method='POST',
headers={'x-emc-meta': user_meta})
result = self.connection.request(path + '?metadata/system')
meta = self._emc_meta(result)
extra = {
'object_id': meta['objectid'],
'meta_data': meta_data,
}
return Object(object_name, bytes_transferred, data_hash, extra,
meta_data, container, self) | StopIteration | dataset/ETHPy150Open apache/libcloud/libcloud/storage/drivers/atmos.py/AtmosDriver.upload_object_via_stream |
def test_set_vif_bandwidth_config_no_extra_specs(self):
# Test whether test_set_vif_bandwidth_config_no_extra_specs fails when
# its second parameter has no 'extra_specs' field.
try:
# The conf will never be user be used, so we can use 'None'.
# An empty dictionary is fine: all that matters it that there is no
# 'extra_specs' field.
designer.set_vif_bandwidth_config(None, {})
except __HOLE__ as e:
self.fail('KeyError: %s' % e) | KeyError | dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/tests/unit/virt/libvirt/test_designer.py/DesignerTestCase.test_set_vif_bandwidth_config_no_extra_specs |
def update(self, dispatch_fn):
touchmap = self.touchmap
while True:
try:
value = self.q.pop()
except __HOLE__:
return
action, fid, x, y = value
y = 1 - y
if fid not in touchmap:
touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))
else:
me = touchmap[fid]
me.move((x, y))
if action == 'fingerdown':
dispatch_fn('begin', me)
elif action == 'fingerup':
me.update_time_end()
dispatch_fn('end', me)
del touchmap[fid]
else:
dispatch_fn('update', me) | IndexError | dataset/ETHPy150Open kivy/kivy/kivy/core/window/window_sdl2.py/SDL2MotionEventProvider.update |
def _mainloop(self):
EventLoop.idle()
# for android/iOS, we don't want to have any event nor executing our
# main loop while the pause is going on. This loop wait any event (not
# handled by the event filter), and remove them from the queue.
# Nothing happen during the pause on iOS, except gyroscope value sended
# over joystick. So it's safe.
while self._pause_loop:
self._win.wait_event()
if not self._pause_loop:
break
self._win.poll()
while True:
event = self._win.poll()
if event is False:
break
if event is None:
continue
action, args = event[0], event[1:]
if action == 'quit':
if self.dispatch('on_request_close'):
continue
EventLoop.quit = True
self.close()
break
elif action in ('fingermotion', 'fingerdown', 'fingerup'):
# for finger, pass the raw event to SDL motion event provider
# XXX this is problematic. On OSX, it generates touches with 0,
# 0 coordinates, at the same times as mouse. But it works.
# We have a conflict of using either the mouse or the finger.
# Right now, we have no mechanism that we could use to know
# which is the preferred one for the application.
if platform in ('ios', 'android'):
SDL2MotionEventProvider.q.appendleft(event)
pass
elif action == 'mousemotion':
x, y = args
x, y = self._fix_mouse_pos(x, y)
self._mouse_x = x
self._mouse_y = y
# don't dispatch motion if no button are pressed
if len(self._mouse_buttons_down) == 0:
continue
self._mouse_meta = self.modifiers
self.dispatch('on_mouse_move', x, y, self.modifiers)
elif action in ('mousebuttondown', 'mousebuttonup'):
x, y, button = args
x, y = self._fix_mouse_pos(x, y)
btn = 'left'
if button == 3:
btn = 'right'
elif button == 2:
btn = 'middle'
eventname = 'on_mouse_down'
self._mouse_buttons_down.add(button)
if action == 'mousebuttonup':
eventname = 'on_mouse_up'
self._mouse_buttons_down.remove(button)
self._mouse_x = x
self._mouse_y = y
self.dispatch(eventname, x, y, btn, self.modifiers)
elif action.startswith('mousewheel'):
self._update_modifiers()
x, y, button = args
btn = 'scrolldown'
if action.endswith('up'):
btn = 'scrollup'
elif action.endswith('right'):
btn = 'scrollright'
elif action.endswith('left'):
btn = 'scrollleft'
self._mouse_meta = self.modifiers
self._mouse_btn = btn
#times = x if y == 0 else y
#times = min(abs(times), 100)
#for k in range(times):
self._mouse_down = True
self.dispatch('on_mouse_down',
self._mouse_x, self._mouse_y, btn, self.modifiers)
self._mouse_down = False
self.dispatch('on_mouse_up',
self._mouse_x, self._mouse_y, btn, self.modifiers)
elif action == 'dropfile':
dropfile = args
self.dispatch('on_dropfile', dropfile[0])
# video resize
elif action == 'windowresized':
self._size = self._win.window_size
# don't use trigger here, we want to delay the resize event
cb = self._do_resize
Clock.unschedule(cb)
Clock.schedule_once(cb, .1)
elif action == 'windowresized':
self.canvas.ask_update()
elif action == 'windowrestored':
self.dispatch('on_restore')
self.canvas.ask_update()
elif action == 'windowexposed':
self.canvas.ask_update()
elif action == 'windowminimized':
self.dispatch('on_minimize')
if Config.getboolean('kivy', 'pause_on_minimize'):
self.do_pause()
elif action == 'windowmaximized':
self.dispatch('on_maximize')
elif action == 'windowhidden':
self.dispatch('on_hide')
elif action == 'windowshown':
self.dispatch('on_show')
elif action == 'windowfocusgained':
self._focus = True
elif action == 'windowfocuslost':
self._focus = False
elif action == 'windowenter':
self.dispatch('on_cursor_enter')
elif action == 'windowleave':
self.dispatch('on_cursor_leave')
elif action == 'joyaxismotion':
stickid, axisid, value = args
self.dispatch('on_joy_axis', stickid, axisid, value)
elif action == 'joyhatmotion':
stickid, hatid, value = args
self.dispatch('on_joy_hat', stickid, hatid, value)
elif action == 'joyballmotion':
stickid, ballid, xrel, yrel = args
self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)
elif action == 'joybuttondown':
stickid, buttonid = args
self.dispatch('on_joy_button_down', stickid, buttonid)
elif action == 'joybuttonup':
stickid, buttonid = args
self.dispatch('on_joy_button_up', stickid, buttonid)
elif action in ('keydown', 'keyup'):
mod, key, scancode, kstr = args
try:
key = self.key_map[key]
except KeyError:
pass
if action == 'keydown':
self._update_modifiers(mod, key)
else:
self._update_modifiers(mod) # ignore the key, it
# has been released
# if mod in self._meta_keys:
if (key not in self._modifiers and
key not in self.command_keys.keys()):
try:
kstr = unichr(key)
except __HOLE__:
pass
#if 'shift' in self._modifiers and key\
# not in self.command_keys.keys():
# return
if action == 'keyup':
self.dispatch('on_key_up', key, scancode)
continue
# don't dispatch more key if down event is accepted
if self.dispatch('on_key_down', key,
scancode, kstr,
self.modifiers):
continue
self.dispatch('on_keyboard', key,
scancode, kstr,
self.modifiers)
elif action == 'textinput':
text = args[0]
self.dispatch('on_textinput', text)
# unhandled event !
else:
Logger.trace('WindowSDL: Unhandled event %s' % str(event)) | ValueError | dataset/ETHPy150Open kivy/kivy/kivy/core/window/window_sdl2.py/WindowSDL._mainloop |
def __getattr__(self, name):
try:
return self[name.replace('_', '-')]
except __HOLE__ as error:
raise AttributeError(str(error)) | KeyError | dataset/ETHPy150Open letsencrypt/letsencrypt/acme/acme/messages.py/Directory.__getattr__ |
def __getitem__(self, name):
try:
return self._jobj[self._canon_key(name)]
except __HOLE__:
raise KeyError('Directory field not found') | KeyError | dataset/ETHPy150Open letsencrypt/letsencrypt/acme/acme/messages.py/Directory.__getitem__ |
@classmethod
def from_json(cls, jobj):
jobj['meta'] = cls.Meta.from_json(jobj.pop('meta', {}))
try:
return cls(jobj)
except __HOLE__ as error:
raise jose.DeserializationError(str(error)) | ValueError | dataset/ETHPy150Open letsencrypt/letsencrypt/acme/acme/messages.py/Directory.from_json |
def command(options=None, usage=None, name=None, shortlist=False, hide=False):
'''Decorator to mark function to be used for command line processing.
All arguments are optional:
- ``options``: options in format described in docs. If not supplied,
will be determined from function.
- ``usage``: usage string for function, replaces ``%name`` with name
of program or subcommand. In case if it's subcommand and ``%name``
is not present, usage is prepended by ``name``
- ``name``: used for multiple subcommands. Defaults to wrapped
function name
- ``shortlist``: if command should be included in shortlist. Used
only with multiple subcommands
- ``hide``: if command should be hidden from help listing. Used only
with multiple subcommands, overrides ``shortlist``
'''
def wrapper(func):
try:
options_ = list(guess_options(func))
except TypeError:
options_ = []
try:
options_ = options_ + list(options)
except TypeError:
pass
name_ = name or func.__name__.replace('_', '-')
if usage is None:
usage_ = guess_usage(func, options_)
else:
usage_ = usage
prefix = hide and '~' or (shortlist and '^' or '')
CMDTABLE[prefix + name_] = (func, options_, usage_)
def help_func(name=None):
return help_cmd(func, replace_name(usage_, sysname()), options_)
@wraps(func)
def inner(*args, **opts):
# look if we need to add 'help' option
try:
(True for option in reversed(options_)
if option[1] == 'help').next()
except __HOLE__:
options_.append(('h', 'help', False, 'show help'))
argv = opts.pop('argv', sys.argv[1:])
if opts.pop('help', False):
return help_func()
if args or opts:
# no catcher here because this is call from Python
return call_cmd_regular(func, options_)(*args, **opts)
try:
opts, args = catcher(lambda: parse(argv, options_), help_func)
except Abort:
return -1
try:
if opts.pop('help', False):
return help_func()
return catcher(lambda: call_cmd(name_, func)(*args, **opts),
help_func)
except Abort:
return -1
return inner
return wrapper | StopIteration | dataset/ETHPy150Open kennethreitz-archive/argue/argue/core.py/command |
def autocomplete(cmdtable, args):
"""Command and option completion.
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if not os.environ.has_key('ARGUE_AUTO_COMPLETE'):
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword-1]
except __HOLE__:
current = ''
commands = []
for k in cmdtable.keys():
commands += aliases_(k)
# command
if cword == 1:
print ' '.join(filter(lambda x: x.startswith(current), commands))
# command options
elif cwords[0] in commands:
options = []
aliases, (cmd, opts, usage) = findcmd(cwords[0], cmdtable)
for (short, long, default, help) in opts:
options.append('-%s' % short)
options.append('--%s' % long)
options = [o for o in options if o.startswith(current)]
print ' '.join(filter(lambda x: x.startswith(current), options))
sys.exit(1) | IndexError | dataset/ETHPy150Open kennethreitz-archive/argue/argue/core.py/autocomplete |
def read_stdout(self, num_bytes):
# Obtain useful read-some-bytes function
if self.using_pty:
# Need to handle spurious OSErrors on some Linux platforms.
try:
data = os.read(self.parent_fd, num_bytes)
except __HOLE__ as e:
# Only eat this specific OSError so we don't hide others
if "Input/output error" not in str(e):
raise
# The bad OSErrors happen after all expected output has
# appeared, so we return a falsey value, which triggers the
# "end of output" logic in code using reader functions.
data = None
else:
data = os.read(self.process.stdout.fileno(), num_bytes)
return data | OSError | dataset/ETHPy150Open pyinvoke/invoke/invoke/runners.py/Local.read_stdout |
def write_stdin(self, data):
# NOTE: parent_fd from os.fork() is a read/write pipe attached to our
# forked process' stdout/stdin, respectively.
fd = self.parent_fd if self.using_pty else self.process.stdin.fileno()
# Try to write, ignoring broken pipes if encountered (implies child
# process exited before the process piping stdin to us finished;
# there's nothing we can do about that!)
try:
return os.write(fd, data)
except __HOLE__ as e:
if 'Broken pipe' not in str(e):
raise | OSError | dataset/ETHPy150Open pyinvoke/invoke/invoke/runners.py/Local.write_stdin |
def __getattr__(self, attr):
try:
return self._links[attr]
except __HOLE__:
raise AttributeError(attr) | KeyError | dataset/ETHPy150Open jacobian/valor/valor/resource.py/Resource.__getattr__ |
def _is_numeric(self, value):
try:
int(value)
except __HOLE__:
return False
return True | ValueError | dataset/ETHPy150Open wooyek/flask-social-blueprint/example/gae/auth/models.py/AppEngineUserDatastore._is_numeric |
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = dbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except __HOLE__:
raise ValueError("Not a recognized database") | KeyError | dataset/ETHPy150Open kuri65536/python-for-android/python3-alpha/python-libs/gdata/tlslite/BaseDB.py/BaseDB.open |
def get_current_log_level(self):
"""
Fetches the current log level, the level set to INFO
by default if an unknown level given in the config
Args:
None
Return:
Log level
Raise:
None
"""
try:
if (self.log_config['log_level'] and
self.log_config['log_level'] in self.log_levels):
return self.log_levels.index(self.log_config['log_level'])
else:
print ("[WARNING]: Unknown log_level defined in the log_config"
"configuration: {0}. Overriding it to the default"
"level: INFO".format(self.log_config['log_level']))
self.log_config['log_level'] = "INFO"
return self.log_levels.index(self.log_config['log_level'])
except __HOLE__:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> log_level option. Set it to false if you "
"want to, disable verbose logging")
sys.exit(1) | KeyError | dataset/ETHPy150Open linkedin/simoorg/src/simoorg/Logger.py/Logger.get_current_log_level |
def is_verbose_logging_enabled(self):
"""
Check if verbose flag is set
Args:
None
Return:
True if verbose flag is set else False
Raise:
None
"""
try:
if self.log_config['verbose']:
return True
else:
return False
except __HOLE__:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> verbose option. Set it to false if you want"
" to disable verbose logging")
sys.exit(1) | KeyError | dataset/ETHPy150Open linkedin/simoorg/src/simoorg/Logger.py/Logger.is_verbose_logging_enabled |
def is_console_logging_enabled(self):
"""
Check if console logging is enabled
Args:
None
Return:
True if console logging is enabled else False
Raise:
None
"""
try:
if self.log_config['console']:
return True
else:
return False
except __HOLE__:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> console option. Set it to false if you "
"want to disable logging into console")
sys.exit(1) | KeyError | dataset/ETHPy150Open linkedin/simoorg/src/simoorg/Logger.py/Logger.is_console_logging_enabled |
def is_file_logging_enabled(self):
"""
Check if file logging is enabled
Args:
None
Return:
True if console logging is enabled else False
Raise:
None
"""
try:
if self.log_config['path']:
return True
else:
return False
except __HOLE__:
print ("[FATAL]: Logger config is incomplete. Please define "
"log_config-> path option. Set it to false if you want to "
"disable it.")
sys.exit(1) | KeyError | dataset/ETHPy150Open linkedin/simoorg/src/simoorg/Logger.py/Logger.is_file_logging_enabled |
def get_logger_file_path(self):
"""
Fetch the logger file path
Args:
None
Return:
Path to the log file
Raise:
None
"""
try:
if self.log_config['path']:
return self.log_config['path']
else:
return None
except __HOLE__:
print "[FATAL]: Logger configuration is not defined"
sys.exit(1) | KeyError | dataset/ETHPy150Open linkedin/simoorg/src/simoorg/Logger.py/Logger.get_logger_file_path |
def init_log(log_level=WARNING, log_path=True, log_truncate=False,
log_size=10000000, log_numbackups=1, log_color=True):
formatter = Formatter(
"%(asctime)s %(levelname)s %(name)s %(filename)s:%(funcName)s():L%(lineno)d %(message)s"
)
# We'll always use a stream handler
stream_handler = StreamHandler(sys.stdout)
if log_color:
color_formatter = ColorFormatter(
'$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s %(filename)s:%(funcName)s():L%(lineno)d $RESET %(message)s'
)
stream_handler.setFormatter(color_formatter)
else:
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# If we have a log path, we'll also setup a log file
if log_path:
if not isinstance(log_path, str):
data_directory = os.path.expandvars('$XDG_DATA_HOME')
if data_directory == '$XDG_DATA_HOME':
# if variable wasn't set
data_directory = os.path.expanduser("~/.local/share")
data_directory = os.path.join(data_directory, 'qtile')
if not os.path.exists(data_directory):
os.makedirs(data_directory)
log_path = os.path.join(data_directory, '%s.log')
try:
log_path %= 'qtile'
except __HOLE__: # Happens if log_path doesn't contain formatters.
pass
log_path = os.path.expanduser(log_path)
if log_truncate:
with open(log_path, "w"):
pass
file_handler = RotatingFileHandler(
log_path,
maxBytes=log_size,
backupCount=log_numbackups
)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(log_level)
# Capture everything from the warnings module.
captureWarnings(True)
warnings.simplefilter("always")
logger.warning('Starting logging for Qtile')
return logger | TypeError | dataset/ETHPy150Open qtile/qtile/libqtile/log_utils.py/init_log |
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
# OPENSLIDES: We do not use the django autoreload command
# autoreload.raise_last_exception()
# OPENSLIDES: This line is not needed by tornado
# threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run_tornado(
self.addr,
int(self.port),
handler,
ipv6=self.use_ipv6)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except __HOLE__:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0) | KeyError | dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/core/management/commands/runserver.py/Command.inner_run |
def is_dir(path):
try:
return (040000 & (os.stat(path).st_mode)) > 0
except __HOLE__:
return False | OSError | dataset/ETHPy150Open richo/groundstation/groundstation/utils/__init__.py/is_dir |
def truncateletters(value, arg):
"""
Truncates a string after a certain number of letters
Argument: Number of letters to truncate after
"""
from django_extensions.utils.text import truncate_letters
try:
length = int(arg)
except __HOLE__: # invalid literal for int()
return value # Fail silently
return truncate_letters(value, length) | ValueError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/django-extensions-1.5.0/django_extensions/templatetags/truncate_letters.py/truncateletters |
@classmethod
def setUpClass(cls):
cls.static_files = Files('static', css='styles.css')
settings.STATICFILES_DIRS = [cls.static_files.directory]
settings.WHITENOISE_USE_FINDERS = True
settings.WHITENOISE_AUTOREFRESH = True
# Clear cache to pick up new settings
try:
finders.get_finder.cache_clear()
except __HOLE__:
finders._finders.clear()
# Initialize test application
cls.application = cls.init_application()
cls.server = TestServer(cls.application)
super(UseFindersTest, cls).setUpClass() | AttributeError | dataset/ETHPy150Open evansd/whitenoise/tests/test_django_whitenoise.py/UseFindersTest.setUpClass |
def _parse_ip(string, families=(socket.AF_INET, socket.AF_INET6)):
for family in families:
try:
return socket.inet_ntop(family, socket.inet_pton(family, string))
except (__HOLE__, socket.error):
pass
return None | ValueError | dataset/ETHPy150Open abusesa/abusehelper/abusehelper/core/cymruwhois.py/_parse_ip |
@idiokit.stream
def lookup(self, asn):
try:
asn = int(asn)
except __HOLE__:
idiokit.stop(())
results = self._cache.get(asn, None)
if results is not None:
idiokit.stop(results)
try:
txt_results = yield dns.txt(
"AS{0}.asn.cymru.com".format(asn),
resolver=self._resolver)
except dns.DNSError:
idiokit.stop(())
results = _split(txt_results, self._keys)
self._cache.set(asn, results)
idiokit.stop(results) | ValueError | dataset/ETHPy150Open abusesa/abusehelper/abusehelper/core/cymruwhois.py/ASNameLookup.lookup |
def action_redis_server_disconnect(config):
"""
Disconnect one or more users from server
"""
log.warning(" - Trying to connect with redis server...")
# Connection with redis
con = redis.StrictRedis(host=config.target, port=config.port, db=config.db)
clients = {x['addr']: x['addr'] for x in con.client_list()}
# Disconnect all clients?
if config.disconnect_all:
for c in clients:
try:
con.client_kill(c)
log.error(" - Client '%s' was disconnected" % c)
except redis.exceptions.ResponseError:
log.error(" - Client '%s' is not connected" % c)
# Disconnect only one user
else:
# Check client format
if config.client is None or ":" not in config.client:
log.error(" <!> Invalid client format. Client must be format: IP:PORT, i.e: 10.211.55.2:61864")
return
try:
_c = clients[config.client]
try:
con.client_kill(_c)
log.error(" - Client '%s' was disconnected" % _c)
except redis.exceptions.ResponseError:
log.error(" - Client '%s' is not connected" % _c)
except __HOLE__:
log.error(" <!> Client '%s' doesn't appear to be connected to server" % config.client) | KeyError | dataset/ETHPy150Open cr0hn/enteletaor/enteletaor_lib/modules/redis/redis_disconnect.py/action_redis_server_disconnect |
def lookup_failure(self, artifact_name):
"""Looks up a failure for the given artifact name."""
fn = self.get_filename(artifact_name)
try:
with open(fn, 'rb') as f:
return BuildFailure(json.load(f))
except __HOLE__ as e:
if e.errno != errno.ENOENT:
raise | IOError | dataset/ETHPy150Open lektor/lektor-archive/lektor/buildfailures.py/FailureController.lookup_failure |
def clear_failure(self, artifact_name):
"""Clears a stored failure."""
try:
os.unlink(self.get_filename(artifact_name))
except __HOLE__ as e:
if e.errno != errno.ENOENT:
raise | OSError | dataset/ETHPy150Open lektor/lektor-archive/lektor/buildfailures.py/FailureController.clear_failure |
def store_failure(self, artifact_name, exc_info):
"""Stores a failure from an exception info tuple."""
fn = self.get_filename(artifact_name)
try:
os.makedirs(os.path.dirname(fn))
except __HOLE__:
pass
with open(fn, 'wb') as f:
json.dump(BuildFailure.from_exc_info(
artifact_name, exc_info).to_json(), f)
f.write('\n') | OSError | dataset/ETHPy150Open lektor/lektor-archive/lektor/buildfailures.py/FailureController.store_failure |
def _get_package_path(name):
"""Returns the path to a package or cwd if that cannot be found."""
try:
return os.path.abspath(os.path.dirname(sys.modules[name].__file__))
except (KeyError, __HOLE__):
return os.getcwd() | AttributeError | dataset/ETHPy150Open hhstore/flask-annotated/flask-0.5/flask/helpers.py/_get_package_path |
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
response = self.oauth_request(
access_token, 'http://api.openstreetmap.org/api/0.6/user/details'
)
try:
dom = minidom.parseString(response.content)
except __HOLE__:
return None
user = dom.getElementsByTagName('user')[0]
try:
avatar = dom.getElementsByTagName('img')[0].getAttribute('href')
except IndexError:
avatar = None
return {
'id': user.getAttribute('id'),
'username': user.getAttribute('display_name'),
'account_created': user.getAttribute('account_created'),
'avatar': avatar
} | ValueError | dataset/ETHPy150Open omab/python-social-auth/social/backends/openstreetmap.py/OpenStreetMapOAuth.user_data |
@no_auto_transaction
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_logs(auth, node, **kwargs):
"""
"""
try:
page = int(request.args.get('page', 0))
except __HOLE__:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
if 'count' in request.args:
count = int(request.args['count'])
elif 'count' in kwargs:
count = kwargs['count']
elif request.json and 'count' in request.json.keys():
count = request.json['count']
else:
count = 10
# Serialize up to `count` logs in reverse chronological order; skip
# logs that the current user / API key cannot access
logs, total, pages = _get_logs(node, count, auth, page)
return {'logs': logs, 'total': total, 'pages': pages, 'page': page} | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/log.py/get_logs |
def _what(self, instance, level, depth):
if level <= 1:
if depth == 1:
return Page.objects.toplevel_navigation()
else:
return Page.objects.in_navigation().filter(level__lt=depth)
# mptt starts counting at 0, NavigationNode at 1; if we need the submenu
# of the current page, we have to add 2 to the mptt level
if instance.__class__.__name__ != 'PagePretender':
if instance.level + 2 == level:
pass
elif instance.level + 2 < level:
try:
queryset = instance.get_descendants().filter(level=level - 2, in_navigation=True)
instance = PageManager.apply_active_filters(queryset)[0]
except __HOLE__:
return []
else:
instance = instance.get_ancestors()[level - 2]
# special case for the navigation extension
if getattr(instance, 'navigation_extension', None):
return instance.extended_navigation(depth=depth,
request=self.render_context.get('request', None))
else:
if depth == 1:
try:
if instance.__class__.__name__ == 'PagePretender':
return instance.children
else:
return instance.children.in_navigation()
except:
return []
else:
queryset = instance.get_descendants().filter(level__lte=instance.level + depth, in_navigation=True)
return PageManager.apply_active_filters(queryset) | IndexError | dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/module/nav/templatetags/webcms_nav_tags.py/NavigationNode._what |