Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
4,600 | def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except __HOLE__:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %s") % rule)
# Fail closed
return FalseCheck() | ValueError | dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/openstack/common/policy.py/_parse_text_rule |
4,601 | def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except __HOLE__:
# We don't have any matching rule; fail closed
return False | KeyError | dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/openstack/common/policy.py/RuleCheck.__call__ |
4,602 | def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
try:
match = self.match % target
except __HOLE__:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
kind_parts = self.kind.split('.')
leftval = creds
for kind_part in kind_parts:
leftval = leftval[kind_part]
except KeyError:
return False
return match == six.text_type(leftval) | KeyError | dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/openstack/common/policy.py/GenericCheck.__call__ |
4,603 | @classmethod
def _CreateSchemaWithoutProperties(cls, api, name, def_dict, wire_name,
parent):
if parent:
# code objects have __getitem__(), but not .get()
try:
pname = parent['id']
except __HOLE__:
pname = '<unknown>'
name_to_log = '%s.%s' % (pname, name)
else:
name_to_log = name
logging.warning('object without properties %s: %s',
name_to_log, def_dict)
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
return schema | KeyError | dataset/ETHPy150Open google/apis-client-generator/src/googleapis/codegen/schema.py/Schema._CreateSchemaWithoutProperties |
4,604 | def __init__(self, api, schema, name, def_dict, key_for_variants=None):
"""Construct a Property.
A Property requires several elements in its template value dictionary which
are set here:
wireName: the string which labels this Property in the JSON serialization.
dataType: the DataType of this property.
Args:
api: (Api) The Api which owns this Property
schema: (Schema) the schema this Property is part of
name: (string) the name for this Property
def_dict: (dict) the JSON schema dictionary
key_for_variants: (dict) if given, maps discriminator values to
variant schemas.
Raises:
ApiException: If we have an array type without object definitions.
"""
super(Property, self).__init__(def_dict, api, wire_name=name)
self.ValidateName(name)
self.schema = schema
self._key_for_variants = key_for_variants
# TODO(user): find a better way to mark a schema as an array type
# so we can display schemas like BlogList in method responses
try:
if self.values['wireName'] == 'items' and self.values['type'] == 'array':
self.schema.values['isList'] = True
except __HOLE__:
pass
# If the schema value for this property defines a new object directly,
# rather than refering to another schema, we will have to create a class
# name for it. We create a unique name by prepending the schema we are
# in to the object name.
tentative_class_name = api.NestedClassNameForProperty(name, schema)
self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name,
parent=schema, wire_name=name) | KeyError | dataset/ETHPy150Open google/apis-client-generator/src/googleapis/codegen/schema.py/Property.__init__ |
4,605 | def ParseFieldDescs():
global fields_to_derive
global fields
f = open('.bookworm/metadata/field_descriptions.json', 'r')
try:
fields = json.loads(f.read())
except __HOLE__:
raise ValueError("Error parsing JSON: Check to make sure that your field_descriptions.json file is valid?")
f.close()
derivedFile = open('.bookworm/metadata/field_descriptions_derived.json', 'w')
output = []
for field in fields:
if field["datatype"] == "time":
if "derived" in field:
fields_to_derive.append(field)
else:
output.append(field)
else:
output.append(field)
for field in fields_to_derive:
for derive in field["derived"]:
if "aggregate" in derive:
tmp = dict(datatype="time", type="integer", unique=True)
tmp["field"] = '_'.join([field["field"], derive["resolution"],
derive["aggregate"]])
output.append(tmp)
else:
tmp = dict(datatype="time", type="integer", unique=True)
tmp["field"] = '_'.join([field["field"], derive["resolution"]])
output.append(tmp)
derivedFile.write(json.dumps(output))
derivedFile.close() | ValueError | dataset/ETHPy150Open Bookworm-project/BookwormDB/bookwormDB/MetaParser.py/ParseFieldDescs |
4,606 | def ParseJSONCatalog(target="default",source = "default"):
global fields_to_derive
if target=="default":
target=open(".bookworm/metadata/jsoncatalog_derived.txt", "w")
if source=="default":
source = open(".bookworm/metadata/jsoncatalog.txt", "r")
f = target
for data in source:
for char in ['\t', '\n']:
data = data.replace(char, '')
try:
line = json.loads(data)
except:
sys.stderr.write('JSON Parsing Failed:\n%s\n' % data)
continue
for field in fields:
# Smash together misidentified lists
try:
if field['unique'] and isinstance(line[field["field"]],list):
line[field["field"]] = "--".join(line[field["field"]])
except KeyError:
pass
for field in fields_to_derive:
"""
Using fields_to_derive as a shorthand for dates--this may break if we get more ambitious about derived fields,
but this whole metadata-parsing code needs to be refactored anyway.
Note: this code is inefficient--it parses the same date multiple times. We should be parsing the date once and pulling
derived fields out of that one parsing.
"""
try:
if line[field["field"]]=="":
# Use blankness as a proxy for unknown
continue
time = dateutil.parser.parse(line[field["field"]],default = defaultDate)
intent = [time.year,time.month,time.day]
content = [str(item) for item in intent]
pass
except:
"""
Fall back to parsing as strings
"""
try:
datem = line[field["field"]].split("T")[0]
content = datem.split('-')
intent = [int(item) for item in content]
except __HOLE__:
#It's OK not to have an entry for a time field
continue
except ValueError:
# Thrown if fields are empty on taking the int value: treat as junk
continue
except AttributeError:
"""
Happens if it's an integer, which is a forgiveable way
to enter a year:
"""
content = [str(line[field['field']])]
intent = [line[field['field']]]
if not content:
continue
else:
for derive in field["derived"]:
try:
if "aggregate" in derive:
if derive["resolution"] == 'day' and \
derive["aggregate"] == "year":
k = "%s_day_year" % field["field"]
dt = date(intent[0], intent[1], intent[2])
line[k] = dt.timetuple().tm_yday
elif derive["resolution"] == 'day' and \
derive["aggregate"] == "month":
k = "%s_day_month" % field["field"]
line[k] = intent[2]
elif derive["resolution"] == 'day' and \
derive["aggregate"] == "week":
k = "%s_day_month" % field["field"]
dt = date(intent[0], intent[1], intent[2])
#Python and javascript handle weekdays differently:
#Like JS, we want to begin on Sunday with zero
line[k] = dt.weekday() + 1
if (line[k])==7: line[k] = 0
elif derive["resolution"] == 'month' and \
derive["aggregate"] == "year":
k = "%s_month_year" % field["field"]
dt = date(1,intent[1],1)
line[k] = dt.timetuple().tm_yday
elif derive["resolution"] == 'week' and \
derive["aggregate"] == "year":
dt = date(intent[0], intent[1], intent[2])
k = "%s_week_year" % field["field"]
line[k] = int(dt.timetuple().tm_yday/7)*7
elif derive["resolution"] == 'hour' and \
derive["aggregate"] == "day":
k = "%s_hour_day" % field["field"]
line[k] = time.hour
elif derive["resolution"] == 'minute' and \
derive["aggregate"] == "day":
k = "%s_hour_day" % field["field"]
line[k] = time.hour*60 + time.minute
else:
logging.warning('Problem with aggregate resolution.')
continue
else:
if derive["resolution"] == 'year':
line["%s_year" % field["field"]] = intent[0]
elif derive["resolution"] == 'month':
try:
k = "%s_month" % field["field"]
dt = date(intent[0], intent[1], 1)
line[k] = DaysSinceZero(dt)
except:
logging.warning("Problem with date fields\n")
pass
elif derive['resolution'] == 'week':
k = "%s_week" % field['field']
dt = date(intent[0], intent[1], intent[2])
inttime = DaysSinceZero(dt)
time = int(inttime/7)*7
#Not starting on Sunday or anything funky like that. Actually, I don't know what we're starting on. Adding an integer here would fix that.
line[k] = time
elif derive['resolution'] == 'day':
k = "%s_day" % field['field']
dt = date(intent[0], intent[1], intent[2])
inttime = DaysSinceZero(dt)
line[k] = inttime
else:
logging.warning('Resolution %s currently not supported.' %(derive['resolution']))
continue
except ValueError:
# One of out a million Times articles threw this with
# a year of like 111,203. It's not clear how best to
# handle this.
logging.warning( "ERROR: %s " % line[field["field"]] + \
"did not convert to proper date. Moving on...")
#raise
pass
except Exception, e:
logging.warning( '*'*50)
logging.warning('ERROR: %s\nINFO: %s\n' % (str(e), e.__doc__))
logging.warning( '*'*50)
line.pop(field["field"])
f.write('%s\n' % json.dumps(line))
f.flush()
f.close() | KeyError | dataset/ETHPy150Open Bookworm-project/BookwormDB/bookwormDB/MetaParser.py/ParseJSONCatalog |
4,607 | def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except __HOLE__:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# Your project root | KeyError | dataset/ETHPy150Open xenith/django-base-template/project_name/settings/base.py/get_env_setting |
4,608 | def getChild(self, name, request):
if name == '':
return self
td = '.twistd'
if name[-len(td):] == td:
username = name[:-len(td)]
sub = 1
else:
username = name
sub = 0
try:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell \
= self._pwd.getpwnam(username)
except __HOLE__:
return resource.NoResource()
if sub:
twistdsock = os.path.join(pw_dir, self.userSocketName)
rs = ResourceSubscription('unix',twistdsock)
self.putChild(name, rs)
return rs
else:
path = os.path.join(pw_dir, self.userDirName)
if not os.path.exists(path):
return resource.NoResource()
return static.File(path) | KeyError | dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/web/distrib.py/UserDirectory.getChild |
4,609 | def test_failure():
# urllib tries 5 more times before it gives up
server.accept(5)
try:
authfetch('bing','wrong')
assert False, "this should raise an exception"
except __HOLE__ as e:
assert e.code == 401 | HTTPError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/tests/test_auth/test_auth_digest.py/test_failure |
4,610 | def test_strslice(self):
try:
str(self.annot)
assert 0, "should not get here"
except __HOLE__:
pass | ValueError | dataset/ETHPy150Open cjlee112/pygr/tests/annotation_test.py/AnnotationSeq_Test.test_strslice |
4,611 | def test_setitem(self):
try:
self.db['foo'] = 'bar' # use 'add_annotation' instead
assert 0, "should not reach this point"
except __HOLE__:
pass | KeyError | dataset/ETHPy150Open cjlee112/pygr/tests/annotation_test.py/AnnotationDB_Test.test_setitem |
4,612 | def test_readonly(self):
"AnnotationDB readonly"
try:
self.db.copy() # what should 'copy' do on AD?
assert 0, 'this method should raise NotImplementedError'
except NotImplementedError:
pass
try: # what should 'setdefault' do on AD?
self.db.setdefault('foo')
assert 0, 'this method should raise NotImplementedError'
except NotImplementedError:
pass
try: # what should 'update' do on AD?
self.db.update({})
assert 0, 'this method should raise NotImplementedError'
except NotImplementedError:
pass
try:
self.db.clear()
assert 0, 'this method should raise NotImplementedError'
except __HOLE__:
pass
try:
self.db.pop()
assert 0, 'this method should raise NotImplementedError'
except NotImplementedError:
pass
try:
self.db.popitem()
assert 0, 'this method should raise NotImplementedError'
except NotImplementedError:
pass | NotImplementedError | dataset/ETHPy150Open cjlee112/pygr/tests/annotation_test.py/AnnotationDB_Test.test_readonly |
4,613 | def test_bad_seqdict(self):
"AnnotationDB bad seqdict"
class Annotation(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
slicedb = dict(annot1=Annotation(id='seq', start=0, stop=10),
annot2=Annotation(id='seq', start=5, stop=9))
foo_dict = dict(foo=Sequence('ATGGGGCCGATTG', 'foo'))
try:
db = AnnotationDB(slicedb, foo_dict)
assert 0, "incorrect seqdb; key error should be raised"
except __HOLE__:
pass | KeyError | dataset/ETHPy150Open cjlee112/pygr/tests/annotation_test.py/AnnotationDB_Test.test_bad_seqdict |
4,614 | def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
USE_I18N=True,
ROOT_URLCONF="tests.urls",
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.sqlite3",
}
},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"addendum",
],
# TODO conditionally include south
MIDDLEWARE_CLASSES=(), # Silence Django 1.7 warnings
SITE_ID=1,
FIXTURE_DIRS=['tests/fixtures'],
LANGUAGE_CODE='es',
)
try:
django.setup()
except __HOLE__:
pass | AttributeError | dataset/ETHPy150Open bennylope/django-addendum/conftest.py/pytest_configure |
4,615 | def import_teams(fileToImport):
try:
sh = xlrd.open_workbook(filename=None, file_contents=fileToImport.read()).sheet_by_index(0)
except:
return ['ERROR: Please upload an .xlsx file. This filetype is not compatible']
num_teams = 0
found_end = False
team_errors = []
while found_end == False:
try:
sh.cell(num_teams, 0).value
num_teams +=1
except IndexError:
found_end = True
#Verify sheet has required number of columns
try:
sh.cell(0, 8).value
except:
team_errors.append('ERROR: Insufficient Columns in Sheet. No Data Read')
return team_errors
for i in range(1, num_teams):
team_name = sh.cell(i, 0).value
if team_name == '':
team_errors.append('Row ' + str(i) + ': Empty Team Name')
continue
try:
Team.objects.get(name=team_name)
team_errors.append(team_name + ': Duplicate Team Name')
continue
except:
pass
school_name = sh.cell(i, 1).value.strip()
try:
team_school = School.objects.get(name__iexact=school_name)
except:
#Create school through SchoolForm because for some reason they don't save otherwise
form = SchoolForm(data={'name': school_name})
if form.is_valid():
form.save()
else:
team_errors.append(team_name + ": Invalid School")
continue
team_school = School.objects.get(name__iexact=school_name)
#TODO: Verify there are not multiple free seeds from the same school
team_seed = sh.cell(i,2).value.strip().lower()
if team_seed == 'full seed' or team_seed == 'full':
team_seed = 3
elif team_seed == 'half seed' or team_seed == 'half':
team_seed = 2
elif team_seed == 'free seed' or team_seed == 'free':
team_seed = 1
elif team_seed == 'unseeded' or team_seed == 'un' or team_seed == 'none' or team_seed == '':
team_seed = 0
else:
team_errors.append(team_name + ': Invalid Seed Value')
continue
deb1_name = sh.cell(i,3).value
if deb1_name == '':
team_errors.append(team_name + ': Empty Debater-1 Name')
continue
try:
Debater.objects.get(name=deb1_name)
team_errors.append(team_name + ': Duplicate Debater-1 Name')
continue
except:
pass
deb1_status = sh.cell(i,4).value.lower()
if deb1_status == 'novice' or deb1_status == 'nov' or deb1_status == 'n':
deb1_status = 1
else:
deb1_status = 0
deb1_phone = sh.cell(i,5).value
deb1_provider = sh.cell(i,6).value
iron_man = False
deb2_name = sh.cell(i,7).value
if deb2_name == '':
iron_man = True
if (not iron_man):
try:
Debater.objects.get(name=deb2_name)
team_errors.append(team_name + ': Duplicate Debater-2 Name')
continue
except:
pass
deb2_status = sh.cell(i,8).value.lower()
if deb2_status == 'novice' or deb2_status == 'nov' or deb2_status == 'n':
deb2_status = 1
else:
deb2_status = 0
#Since this is not required data and at the end of the sheet, be ready for index errors
try:
deb2_phone = sh.cell(i,9).value
except IndexError:
deb2_phone = ''
try:
deb2_provider = sh.cell(i,10).value
except __HOLE__:
deb2_provider = ''
#Save Everything
try:
deb1 = Debater(name = deb1_name, novice_status = deb1_status, phone = deb1_phone, provider = deb1_provider)
deb1.save()
except:
team_errors.append(team_name + ': Unkown Error Saving Debater 1')
continue
if (not iron_man):
try:
deb2 = Debater(name = deb2_name, novice_status = deb2_status, phone = deb2_phone, provider = deb2_provider)
deb2.save()
except:
team_errors.append(team_name + ': Unkown Error Saving Debater 2')
team_errors.append(' WARNING: Debaters on this team may be added to database. ' +
'Please Check this Manually')
continue
team = Team(name=team_name, school=team_school, seed=team_seed)
try:
team.save()
team.debaters.add(deb1)
if (not iron_man):
team.debaters.add(deb2)
else:
team_errors.append(team_name + ": Detected to be Iron Man - Still added successfully")
team.save()
except:
team_errors.append(team_name + ': Unknown Error Saving Team')
team_errors.append(' WARNING: Debaters on this team may be added to database. ' +
'Please Check this Manually')
return team_errors | IndexError | dataset/ETHPy150Open jolynch/mit-tab/mittab/libs/data_import/import_teams.py/import_teams |
4,616 | def copy_template(template_name, copy_to, tag_library_name):
"""copies the specified template directory to the copy_to location"""
import django_extensions
import shutil
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f.replace('sample', tag_library_name))
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read())
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except __HOLE__:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new) | OSError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/create_template_tags.py/copy_template |
4,617 | def rst2tex(in_path, out_path):
dir_util.copy_tree(in_path, out_path)
base_dir = os.path.dirname(__file__)
scipy_status = os.path.join(base_dir, '_static/status.sty')
shutil.copy(scipy_status, out_path)
scipy_style = os.path.join(base_dir, '_static/scipy.sty')
shutil.copy(scipy_style, out_path)
preamble = r'''\usepackage{scipy}'''
# Add the LaTeX commands required by Pygments to do syntax highlighting
pygments = None
try:
import pygments
except __HOLE__:
import warnings
warnings.warn(RuntimeWarning('Could not import Pygments. '
'Syntax highlighting will fail.'))
if pygments:
from pygments.formatters import LatexFormatter
from writer.sphinx_highlight import SphinxStyle
preamble += LatexFormatter(style=SphinxStyle).get_style_defs()
settings = {'documentclass': 'IEEEtran',
'use_verbatim_when_possible': True,
'use_latex_citations': True,
'latex_preamble': preamble,
'documentoptions': 'letterpaper,compsoc,twoside',
'halt_level': 3, # 2: warn; 3: error; 4: severe
}
try:
rst, = glob.glob(os.path.join(in_path, '*.rst'))
except ValueError:
raise RuntimeError("Found more than one input .rst--not sure which "
"one to use.")
content = header + open(rst, 'r').read()
tex = dc.publish_string(source=content, writer=writer,
settings_overrides=settings)
stats_file = os.path.join(out_path, 'paper_stats.json')
d = options.cfg2dict(stats_file)
try:
d.update(writer.document.stats)
options.dict2cfg(d, stats_file)
except AttributeError:
print("Error: no paper configuration found")
tex_file = os.path.join(out_path, 'paper.tex')
with open(tex_file, 'w') as f:
f.write(tex) | ImportError | dataset/ETHPy150Open scipy-conference/scipy_proceedings/publisher/build_paper.py/rst2tex |
4,618 | def has_module(module):
try:
__import__(module)
except __HOLE__:
warn(module + ' module not available for testing, '
'consider installing')
return False
return True | ImportError | dataset/ETHPy150Open jsonpickle/jsonpickle/tests/backend_test.py/has_module |
4,619 | def get_data_files():
# generate man pages using rst2man
try:
subprocess.call(["rst2man", "nipapd.man.rst", "nipapd.8"])
subprocess.call(["rst2man", "nipap-passwd.man.rst", "nipap-passwd.1"])
except __HOLE__ as exc:
print >> sys.stderr, "rst2man failed to run:", str(exc)
sys.exit(1)
files = [
('/etc/nipap/', ['nipap.conf.dist']),
('/usr/sbin/', ['nipapd', 'nipap-passwd']),
('/usr/share/nipap/sql/', [
'sql/upgrade-1-2.plsql',
'sql/upgrade-2-3.plsql',
'sql/upgrade-3-4.plsql',
'sql/upgrade-4-5.plsql',
'sql/upgrade-5-6.plsql',
'sql/functions.plsql',
'sql/triggers.plsql',
'sql/ip_net.plsql'
]),
('/usr/share/man/man8/', ['nipapd.8']),
('/usr/share/man/man1/', ['nipap-passwd.1'])
]
return files | OSError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap/setup.py/get_data_files |
4,620 | def main():
manager.connect()
try:
manager.loop.run_forever()
except __HOLE__:
manager.loop.close() | KeyboardInterrupt | dataset/ETHPy150Open gawel/panoramisk/examples/event_listener.py/main |
4,621 | def parse_html(self, r):
alerts = []
if has_lxml:
try:
root = lxml.html.fromstring(r.response.content)
content = ''.join([x for x in root.xpath("//text()") if
x.getparent().tag != "script"])
except __HOLE__:
alerts.append(stealthy("reponse.content-type says html but " \
"unable to parse"))
return alerts
except lxml.etree.ParserError:
return alerts
else:
content = r.response.content
for e in self.html_patterns:
if e.search(content):
if has_lxml: # We are more confident of what we've found
alerts.append(error("response.content matches " + e.pattern))
else:
alerts.append(warning("response.content matches " + e.pattern))
return alerts | UnicodeDecodeError | dataset/ETHPy150Open securusglobal/abrupt/abrupt/alert.py/GenericAlerter.parse_html |
4,622 | def tearDown(self):
try:
os.environ = self.env
except __HOLE__:
pass | AttributeError | dataset/ETHPy150Open openelections/openelections-core/openelex/tests/test_config.py/TestSettings.tearDown |
4,623 | def plot_loss(self):
try:
from matplotlib import pyplot
except __HOLE__:
"Can not plot loss, matplotlib required"
pyplot.plot(self.loss[1:])
pyplot.xlabel("Iteration")
pyplot.ylabel("Loss")
pyplot.show() | ImportError | dataset/ETHPy150Open sisl/Chimp/chimp/agents/dqn_agent.py/DQNAgent.plot_loss |
4,624 | def plot_eval_reward(self):
try:
from matplotlib import pyplot
except __HOLE__:
"Can not plot loss, matplotlib required"
pyplot.plot(self.eval_every * np.arange(len(self.r_eval)), self.r_eval)
pyplot.xlabel("Reward")
pyplot.ylabel("Loss")
pyplot.show() | ImportError | dataset/ETHPy150Open sisl/Chimp/chimp/agents/dqn_agent.py/DQNAgent.plot_eval_reward |
4,625 | def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except __HOLE__:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value | TypeError | dataset/ETHPy150Open django/django/django/contrib/postgres/forms/ranges.py/BaseRangeField.compress |
4,626 | def get_input(
message = None,
validator = None,
suggestions = None,
is_path = False,
):
"""
Gets input from the user
Returns a valid value
Keyword arguments:
message -- printed first
validator -- a function that returns True for a valid input
suggestions -- a list of Suggestions
is_path -- if True, tab autocomplete will be turned on
"""
if suggestions is None:
suggestions = []
if message is not None:
print message
print
# print list of suggestions
max_width = 0
for s in suggestions:
if s.desc is not None and len(s.desc) > max_width:
max_width = len(s.desc)
if max_width > 0:
print '\tSuggested values:'
format_str = '\t%%-4s %%-%ds %%s' % (max_width+2,)
default_found = False
for s in suggestions:
c = s.char
if s.default and not default_found:
default_found = True
c += '*'
desc = ''
if s.desc is not None:
desc = '[%s]' % s.desc
print format_str % (('(%s)' % c), desc, value_to_str(s.value))
if is_path:
# turn on filename autocompletion
delims = readline.get_completer_delims()
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind('TAB: complete')
user_input = None
value = None
valid = False
while not valid:
try:
# Get user input
user_input = raw_input('>> ').strip()
except (__HOLE__, EOFError):
print
sys.exit(0)
if user_input == '':
for s in suggestions:
if s.default:
print 'Using "%s"' % s.value
if s.value is not None and validator is not None:
try:
value = validator(s.value)
valid = True
break
except config_option.BadValue as e:
print 'ERROR:', e
else:
value = s.value
valid = True
break
else:
if len(user_input) == 1:
for s in suggestions:
if s.char.lower() == user_input.lower():
print 'Using "%s"' % s.value
if s.value is not None and validator is not None:
try:
value = validator(s.value)
valid = True
break
except config_option.BadValue as e:
print 'ERROR:', e
else:
value = s.value
valid = True
break
if not valid and validator is not None:
if is_path:
user_input = os.path.expanduser(user_input)
try:
value = validator(user_input)
valid = True
print 'Using "%s"' % value
except config_option.BadValue as e:
print 'ERROR:', e
if not valid:
print 'Invalid input'
if is_path:
# back to normal
readline.set_completer_delims(delims)
readline.parse_and_bind('TAB: ')
return value | KeyboardInterrupt | dataset/ETHPy150Open NVIDIA/DIGITS/digits/config/prompt.py/get_input |
4,627 | def handle(self, *args, **kwargs):
env = os.environ.copy()
classpath = env.get('CLASSPATH', '').split(os.pathsep)
jar = spark.conf.LIVY_ASSEMBLY_JAR.get()
classpath.insert(0, jar)
# Add the hadoop classpath if it's available.
try:
p = subprocess.Popen(['hadoop', 'classpath'], stdout=subprocess.PIPE)
except __HOLE__:
pass
else:
hadoop_classpath = p.communicate()[0]
if p.wait() == 0:
classpath.append(hadoop_classpath)
args = [
"java",
]
args.extend(("-cp", os.pathsep.join(classpath)))
server_host = spark.conf.LIVY_SERVER_HOST.get()
args.append("-Dlivy.server.host=" + server_host)
server_port = spark.conf.LIVY_SERVER_PORT.get()
args.append("-Dlivy.server.port=" + server_port)
session_factory = spark.conf.LIVY_SERVER_SESSION_KIND.get()
args.append("-Dlivy.server.session.factory=" + session_factory)
livy_yarn_jar = spark.conf.LIVY_YARN_JAR.get()
if livy_yarn_jar:
args.append("-Dlivy.yarn.jar=" + livy_yarn_jar)
if spark.conf.LIVY_IMPERSONATION_ENABLED.get():
args.append("-Dlivy.impersonation.enabled=true")
else:
args.append("-Dlivy.impersonation.enabled=false")
args.append("com.cloudera.hue.livy.server.Main")
LOG.info("Executing %r (%r) (%r)" % (args[0], args, env))
# Use exec, so that this takes only one process.
os.execvpe(args[0], args, env) | OSError | dataset/ETHPy150Open cloudera/hue/apps/spark/src/spark/management/commands/livy_server.py/Command.handle |
4,628 | def tearDown(self):
try:
self.conchFactory.proto.done = 1
except __HOLE__:
pass
else:
self.conchFactory.proto.transport.loseConnection()
return defer.gatherResults([
defer.maybeDeferred(self.conchServer.stopListening),
defer.maybeDeferred(self.echoServer.stopListening)]) | AttributeError | dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/conch/test/test_conch.py/ConchServerSetupMixin.tearDown |
4,629 | def _ext_service(self, entity_id, typ, service, binding):
try:
srvs = self[entity_id][typ]
except __HOLE__:
return None
if not srvs:
return srvs
res = []
for srv in srvs:
if "extensions" in srv:
for elem in srv["extensions"]["extension_elements"]:
if elem["__class__"] == service:
if elem["binding"] == binding:
res.append(elem)
return res | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/mongo_store.py/MetadataMDB._ext_service |
4,630 | def get(self, key, default=None):
try:
return getattr(self.stats, key)
except __HOLE__:
return getattr(self, key, default) | AttributeError | dataset/ETHPy150Open ionelmc/pytest-benchmark/src/pytest_benchmark/stats.py/BenchmarkStats.get |
4,631 | def __getitem__(self, key):
try:
return getattr(self.stats, key)
except __HOLE__:
return getattr(self, key) | AttributeError | dataset/ETHPy150Open ionelmc/pytest-benchmark/src/pytest_benchmark/stats.py/BenchmarkStats.__getitem__ |
4,632 | def metric_filter(self, metrics, filter=None):
"""from a list of metrics ie ['cpuStats', 'CPUs', 'usr'] it constructs
a dictionary that can be sent to the metrics endpoint for consumption"""
metrics = list(metrics)
if not filter:
filter = {}
filter[metrics.pop()] = 'all'
return self.metric_filter(metrics, filter)
else:
try:
metric = metrics.pop()
dic = {metric: filter}
return self.metric_filter(metrics, dic)
except __HOLE__:
return metrics, filter | IndexError | dataset/ETHPy150Open serverdensity/sdbot/limbo/plugins/common/basewrapper.py/BaseWrapper.metric_filter |
4,633 | def __init__(self, problems, stop,
arrival_time=None, departure_time=None,
stop_headsign=None, pickup_type=None, drop_off_type=None,
shape_dist_traveled=None, arrival_secs=None,
departure_secs=None, stop_time=None, stop_sequence=None,
timepoint=None):
# Implementation note from Andre, July 22, 2010:
# The checks performed here should be in their own Validate* methods to
# keep consistency. Unfortunately the performance degradation is too great,
# so the validation was left in __init__.
# Performance is also the reason why we don't use the GtfsFactory, but
# have StopTime._STOP_CLASS instead. If a Stop class that does not inherit
# from transitfeed.Stop is used, the extension should also provide a
# StopTime class that updates _STOP_CLASS accordingly.
#
# For more details see the discussion at
# http://codereview.appspot.com/1713041
if stop_time != None:
arrival_time = departure_time = stop_time
if arrival_secs != None:
self.arrival_secs = arrival_secs
elif arrival_time in (None, ""):
self.arrival_secs = None # Untimed
arrival_time = None
else:
try:
self.arrival_secs = util.TimeToSecondsSinceMidnight(arrival_time)
except problems_module.Error:
problems.InvalidValue('arrival_time', arrival_time)
self.arrival_secs = None
if departure_secs != None:
self.departure_secs = departure_secs
elif departure_time in (None, ""):
self.departure_secs = None
departure_time = None
else:
try:
self.departure_secs = util.TimeToSecondsSinceMidnight(departure_time)
except problems_module.Error:
problems.InvalidValue('departure_time', departure_time)
self.departure_secs = None
if not isinstance(stop, self._STOP_CLASS):
# Not quite correct, but better than letting the problem propagate
problems.InvalidValue('stop', stop)
self.stop = stop
self.stop_headsign = stop_headsign
self.timepoint = util.ValidateAndReturnIntValue(
timepoint, [0, 1], None, True, 'timepoint', problems)
self.pickup_type = util.ValidateAndReturnIntValue(
pickup_type, [0, 1, 2, 3], None, True, 'pickup_type', problems)
self.drop_off_type = util.ValidateAndReturnIntValue(
drop_off_type, [0, 1, 2, 3], None, True, 'drop_off_type', problems)
if (self.pickup_type == 1 and self.drop_off_type == 1 and
self.arrival_secs == None and self.departure_secs == None):
problems.OtherProblem('This stop time has a pickup_type and '
'drop_off_type of 1, indicating that riders '
'can\'t get on or off here. Since it doesn\'t '
'define a timepoint either, this entry serves no '
'purpose and should be excluded from the trip.',
type=problems_module.TYPE_WARNING)
if ((self.arrival_secs != None) and (self.departure_secs != None) and
(self.departure_secs < self.arrival_secs)):
problems.InvalidValue('departure_time', departure_time,
'The departure time at this stop (%s) is before '
'the arrival time (%s). This is often caused by '
'problems in the feed exporter\'s time conversion'
% (departure_time, arrival_time))
# If the caller passed a valid arrival time but didn't attempt to pass a
# departure time complain
if (self.arrival_secs != None and
self.departure_secs == None and departure_time == None):
# self.departure_secs might be None because departure_time was invalid,
# so we need to check both
problems.MissingValue('departure_time',
'arrival_time and departure_time should either '
'both be provided or both be left blank. '
'It\'s OK to set them both to the same value.')
# If the caller passed a valid departure time but didn't attempt to pass a
# arrival time complain
if (self.departure_secs != None and
self.arrival_secs == None and arrival_time == None):
problems.MissingValue('arrival_time',
'arrival_time and departure_time should either '
'both be provided or both be left blank. '
'It\'s OK to set them both to the same value.')
if shape_dist_traveled in (None, ""):
self.shape_dist_traveled = None
else:
try:
self.shape_dist_traveled = float(shape_dist_traveled)
except __HOLE__:
problems.InvalidValue('shape_dist_traveled', shape_dist_traveled)
if stop_sequence is not None:
self.stop_sequence = stop_sequence | ValueError | dataset/ETHPy150Open google/transitfeed/transitfeed/stoptime.py/StopTime.__init__ |
4,634 | def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
setattr(wrapper, attr, getattr(wrapped, attr))
except __HOLE__: # Python 2.3 doesn't allow assigning to __name__.
pass
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr))
# Return the wrapper so this can be used as a decorator via curry()
return wrapper | TypeError | dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/utils/functional.py/update_wrapper |
4,635 | def get_pid_by_line_number(self, lineno):
try:
# Account for header
return self.__command_window_line_map_order[lineno - 4]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open joonty/vim-do/autoload/python/rendering.py/ProcessRenderer.get_pid_by_line_number |
4,636 | def import_service(module_name):
parts = module_name.split(":", 1)
if len(parts) == 1:
module_name, obj = module_name, None
else:
module_name, obj = parts[0], parts[1]
try:
__import__(module_name)
except __HOLE__ as exc:
if module_name.endswith(".py") and os.path.exists(module_name):
raise CommandError(
"Failed to find service, did you mean '{}'?".format(
module_name[:-3].replace('/', '.')
)
)
missing_module_re = MISSING_MODULE_TEMPLATE.format(module_name)
# is there a better way to do this?
if re.match(missing_module_re, str(exc)):
raise CommandError(exc)
# found module, but importing it raised an import error elsewhere
# let this bubble (resulting in a full stacktrace being printed)
raise
module = sys.modules[module_name]
if obj is None:
found_services = []
# find top-level objects with entrypoints
for _, potential_service in inspect.getmembers(module, is_type):
if inspect.getmembers(potential_service, is_entrypoint):
found_services.append(potential_service)
if not found_services:
raise CommandError(
"Failed to find anything that looks like a service in module "
"{!r}".format(module_name)
)
else:
try:
service_cls = getattr(module, obj)
except AttributeError:
raise CommandError(
"Failed to find service class {!r} in module {!r}".format(
obj, module_name)
)
if not isinstance(service_cls, type):
raise CommandError("Service must be a class.")
found_services = [service_cls]
return found_services | ImportError | dataset/ETHPy150Open onefinestay/nameko/nameko/cli/run.py/import_service |
4,637 | def run(services, config, backdoor_port=None):
service_runner = ServiceRunner(config)
for service_cls in services:
service_runner.add_service(service_cls)
def shutdown(signum, frame):
# signal handlers are run by the MAINLOOP and cannot use eventlet
# primitives, so we have to call `stop` in a greenlet
eventlet.spawn_n(service_runner.stop)
signal.signal(signal.SIGTERM, shutdown)
if backdoor_port is not None:
setup_backdoor(service_runner, backdoor_port)
service_runner.start()
# if the signal handler fires while eventlet is waiting on a socket,
# the __main__ greenlet gets an OSError(4) "Interrupted system call".
# This is a side-effect of the eventlet hub mechanism. To protect nameko
# from seeing the exception, we wrap the runner.wait call in a greenlet
# spawned here, so that we can catch (and silence) the exception.
runnlet = eventlet.spawn(service_runner.wait)
while True:
try:
runnlet.wait()
except OSError as exc:
if exc.errno == errno.EINTR:
# this is the OSError(4) caused by the signalhandler.
# ignore and go back to waiting on the runner
continue
raise
except KeyboardInterrupt:
print() # looks nicer with the ^C e.g. bash prints in the terminal
try:
service_runner.stop()
except __HOLE__:
print() # as above
service_runner.kill()
else:
# runner.wait completed
break | KeyboardInterrupt | dataset/ETHPy150Open onefinestay/nameko/nameko/cli/run.py/run |
4,638 | @requires_system_grains
def test_groups_includes_primary(self, grains=None):
# Let's create a user, which usually creates the group matching the
# name
uname = self.__random_string()
if self.run_function('user.add', [uname]) is not True:
# Skip because creating is not what we're testing here
self.run_function('user.delete', [uname, True, True])
self.skipTest('Failed to create user')
try:
uinfo = self.run_function('user.info', [uname])
if grains['os_family'] in ('Suse',):
self.assertIn('users', uinfo['groups'])
else:
self.assertIn(uname, uinfo['groups'])
# This uid is available, store it
uid = uinfo['uid']
self.run_function('user.delete', [uname, True, True])
# Now, a weird group id
gname = self.__random_string()
if self.run_function('group.add', [gname]) is not True:
self.run_function('group.delete', [gname, True, True])
self.skipTest('Failed to create group')
ginfo = self.run_function('group.info', [gname])
# And create the user with that gid
if self.run_function('user.add', [uname, uid, ginfo['gid']]) is False:
# Skip because creating is not what we're testing here
self.run_function('user.delete', [uname, True, True])
self.skipTest('Failed to create user')
uinfo = self.run_function('user.info', [uname])
self.assertIn(gname, uinfo['groups'])
except __HOLE__:
self.run_function('user.delete', [uname, True, True])
raise | AssertionError | dataset/ETHPy150Open saltstack/salt/tests/integration/modules/useradd.py/UseraddModuleTest.test_groups_includes_primary |
4,639 | def get_best_run(self):
try:
return BestRun.objects.filter(map=self)[0]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open chaosk/teerace/teerace/race/models.py/Map.get_best_run |
4,640 | def test_get_pricing_invalid_file_path(self):
try:
libcloud.pricing.get_pricing(driver_type='compute', driver_name='bar',
pricing_file_path='inexistent.json')
except __HOLE__:
pass
else:
self.fail('Invalid pricing file path provided, but an exception was not'
' thrown') | IOError | dataset/ETHPy150Open apache/libcloud/libcloud/test/test_pricing.py/PricingTestCase.test_get_pricing_invalid_file_path |
4,641 | def test_get_pricing_invalid_driver_type(self):
try:
libcloud.pricing.get_pricing(driver_type='invalid_type', driver_name='bar',
pricing_file_path='inexistent.json')
except __HOLE__:
pass
else:
self.fail('Invalid driver_type provided, but an exception was not'
' thrown') | AttributeError | dataset/ETHPy150Open apache/libcloud/libcloud/test/test_pricing.py/PricingTestCase.test_get_pricing_invalid_driver_type |
4,642 | def test_get_pricing_not_in_cache(self):
try:
libcloud.pricing.get_pricing(driver_type='compute', driver_name='inexistent',
pricing_file_path=PRICING_FILE_PATH)
except __HOLE__:
pass
else:
self.fail('Invalid driver provided, but an exception was not'
' thrown') | KeyError | dataset/ETHPy150Open apache/libcloud/libcloud/test/test_pricing.py/PricingTestCase.test_get_pricing_not_in_cache |
4,643 | def __getitem__(self, name):
try:
return super(AutoAttrDict, self).__getitem__(name)
except __HOLE__:
d = AutoAttrDict()
super(AutoAttrDict, self).__setitem__(name, d)
return d | KeyError | dataset/ETHPy150Open kdart/pycopia/core/pycopia/jsonconfig.py/AutoAttrDict.__getitem__ |
4,644 | def test_builder_raises_exception_with_undefined_method(self):
builder = self.get_builder()
try:
builder.do_not_exist()
self.fail('Builder did not raise and AttributeError exception')
except __HOLE__:
self.assertTrue(True) | AttributeError | dataset/ETHPy150Open sdispater/orator/tests/query/test_query_builder.py/QueryBuilderTestCase.test_builder_raises_exception_with_undefined_method |
4,645 | @staticmethod
def _strft(dt_obj, null_on_none=False):
"""Convert datetime.datetime to ISO string.
:param null_on_none: bool Occasionally, we will actually want to send an
empty string where a datetime would typically go. For instance, if a
strategy has an end_date set, but then wants to change to use
campaign end date, the POST will normally omit the end_date field
(because you cannot send it with use_campaign_end).
However, this will cause an error because there was an end_date set
previously. So, we need to send an empty string to indicate that it
should be nulled out. In cases like this, null_on_none should be set
to True in the entity's _push dict using a partial to make it a
single-argument function. See strategy.py
:raise AttributeError: if not provided a datetime
:return: str
"""
try:
return dt_obj.strftime("%Y-%m-%dT%H:%M:%S")
except __HOLE__:
if dt_obj is None and null_on_none:
return ""
raise | AttributeError | dataset/ETHPy150Open MediaMath/t1-python/terminalone/entity.py/Entity._strft |
4,646 | def Modules(directory):
"""Creates modules from a plugin directory.
Note that there can be many, if a plugin has standalone parts that merit their
own helpfiles.
Args:
directory: The plugin directory.
Yields:
Module objects as necessary.
"""
directory = directory.rstrip(os.path.sep)
addon_info = None
# Check for module metadata in addon-info.json (if it exists).
addon_info_path = os.path.join(directory, 'addon-info.json')
if os.path.isfile(addon_info_path):
try:
with open(addon_info_path, 'r') as addon_info_file:
addon_info = json.loads(addon_info_file.read())
except (__HOLE__, ValueError) as e:
warnings.warn(
'Failed to read file {}. Error was: {}'.format(addon_info_path, e),
error.InvalidAddonInfo)
plugin_name = None
# Use plugin name from addon-info.json if available. Fall back to dir name.
addon_info = addon_info or {}
plugin_name = addon_info.get(
'name', os.path.basename(os.path.abspath(directory)))
plugin = VimPlugin(plugin_name)
# Set module metadata from addon-info.json.
if addon_info is not None:
# Valid addon-info.json. Apply addon metadata.
if 'author' in addon_info:
plugin.author = addon_info['author']
if 'description' in addon_info:
plugin.tagline = addon_info['description']
# Crawl plugin dir and collect parsed blocks for each file path.
paths_and_blocks = []
standalone_paths = []
autoloaddir = os.path.join(directory, 'autoload')
for (root, dirs, files) in os.walk(directory):
# Visit files in a stable order, since the ordering of e.g. the Maktaba
# flags below depends upon the order that we visit the files.
dirs.sort()
files.sort()
# Prune non-standard top-level dirs like 'test'.
if root == directory:
dirs[:] = [x for x in dirs if x in DOC_SUBDIRS + ['after']]
if root == os.path.join(directory, 'after'):
dirs[:] = [x for x in dirs if x in DOC_SUBDIRS]
for f in files:
filename = os.path.join(root, f)
if os.path.splitext(filename)[1] == '.vim':
relative_path = os.path.relpath(filename, directory)
with open(filename) as filehandle:
lines = list(filehandle)
blocks = list(parser.ParseBlocks(lines, filename))
# Define implicit maktaba flags for files that call
# maktaba#plugin#Enter. These flags have to be special-cased here
# because there aren't necessarily associated doc comment blocks and
# the name is computed from the file name.
if (not relative_path.startswith('autoload' + os.path.sep)
and relative_path != os.path.join('instant', 'flags.vim')):
if ContainsMaktabaPluginEnterCall(lines):
flagpath = relative_path
if flagpath.startswith('after' + os.path.sep):
flagpath = os.path.relpath(flagpath, 'after')
flagblock = Block(vimdoc.FLAG, is_default=True)
name_parts = os.path.splitext(flagpath)[0].split(os.path.sep)
flagname = name_parts.pop(0)
flagname += ''.join('[' + p + ']' for p in name_parts)
flagblock.Local(name=flagname)
flagblock.AddLine(
'Configures whether {} should be loaded.'.format(
relative_path))
default = 0 if flagname == 'plugin[mappings]' else 1
# Use unbulleted list to make sure it's on its own line. Use
# backtick to avoid helpfile syntax highlighting.
flagblock.AddLine(' - Default: {} `'.format(default))
blocks.append(flagblock)
paths_and_blocks.append((relative_path, blocks))
if filename.startswith(autoloaddir):
if blocks and blocks[0].globals.get('standalone'):
standalone_paths.append(relative_path)
docdir = os.path.join(directory, 'doc')
if not os.path.isdir(docdir):
os.mkdir(docdir)
modules = []
main_module = Module(plugin_name, plugin)
for (path, blocks) in paths_and_blocks:
# Skip standalone paths.
if GetMatchingStandalonePath(path, standalone_paths) is not None:
continue
namespace = None
if path.startswith('autoload' + os.path.sep):
namespace = GetAutoloadNamespace(os.path.relpath(path, 'autoload'))
for block in blocks:
main_module.Merge(block, namespace=namespace)
modules.append(main_module)
# Process standalone modules.
standalone_modules = {}
for (path, blocks) in paths_and_blocks:
standalone_path = GetMatchingStandalonePath(path, standalone_paths)
# Skip all but standalone paths.
if standalone_path is None:
continue
assert path.startswith('autoload' + os.path.sep)
namespace = GetAutoloadNamespace(os.path.relpath(path, 'autoload'))
standalone_module = standalone_modules.get(standalone_path)
# Initialize module if this is the first file processed from it.
if standalone_module is None:
standalone_module = Module(namespace.rstrip('#'), plugin)
standalone_modules[standalone_path] = standalone_module
modules.append(standalone_module)
for block in blocks:
standalone_module.Merge(block, namespace=namespace)
for module in modules:
module.Close()
yield module | IOError | dataset/ETHPy150Open google/vimdoc/vimdoc/module.py/Modules |
4,647 | @permission_required("core.manage_shop")
def manage_property_groups(request):
"""The main view to manage properties.
"""
try:
prop = PropertyGroup.objects.all()[0]
url = reverse("lfs_manage_property_group", kwargs={"id": prop.id})
except __HOLE__:
url = reverse("lfs_manage_no_property_groups")
return HttpResponseRedirect(url) | IndexError | dataset/ETHPy150Open diefenbach/django-lfs/lfs/manage/property_groups/views.py/manage_property_groups |
4,648 | def generate(env):
try:
env['BUILDERS']['CopyTo']
env['BUILDERS']['CopyAs']
except __HOLE__, e:
global copyToBuilder
if copyToBuilder is None:
copyToBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Dir,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ copyto_emitter, ] )
global copyAsBuilder
if copyAsBuilder is None:
copyAsBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Entry,
source_factory = env.fs.Entry )
env['BUILDERS']['CopyTo'] = copyToBuilder
env['BUILDERS']['CopyAs'] = copyAsBuilder
env['COPYSTR'] = 'Copy file(s): "$SOURCES" to "$TARGETS"' | KeyError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/filesystem.py/generate |
4,649 | def default(self, obj):
if isinstance(obj, Mapping):
try:
return dict(obj)
except:
pass
if isinstance(obj, Sized) and isinstance(obj, Iterable):
try:
return list(obj)
except:
pass
if callable(getattr(obj, 'to_dict', None)):
return obj.to_dict()
if self.dev_mode:
return repr(obj) # TODO: blargh
if isinstance(obj, type) or callable(obj):
return unicode(repr(obj))
try:
return dict([(k, v) for k, v in obj.__dict__.items()
if not k.startswith('__')])
except __HOLE__:
return unicode(repr(obj))
else:
raise TypeError('cannot serialize to JSON: %r' % obj) | AttributeError | dataset/ETHPy150Open mahmoud/clastic/clastic/render/simple.py/ClasticJSONEncoder.default |
4,650 | def __init__(self, **kwargs):
self.qp_name = kwargs.pop('qp_name', 'format')
self.dev_mode = kwargs.pop('dev_mode', True)
self.json_render = kwargs.pop('json_render',
JSONRender(dev_mode=self.dev_mode))
try:
table_type = kwargs.pop('table_type')
except __HOLE__:
table_type = None
default_tabular = TabularRender()
else:
default_tabular = TabularRender(table_type=table_type)
self.tabular_render = kwargs.pop('tabular_render', default_tabular)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % kwargs) | KeyError | dataset/ETHPy150Open mahmoud/clastic/clastic/render/simple.py/BasicRender.__init__ |
4,651 | def __eq__(self, other):
try:
return self.startEA == other.startEA
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open tmr232/Sark/sark/code/function.py/Function.__eq__ |
4,652 | def rate_limit(limit=100, window=60):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
key = "{0}: {1}".format(request.remote_addr, request.path)
try:
remaining = limit - int(redis.get(key))
except (ValueError, __HOLE__):
remaining = limit
redis.set(key, 0)
expires_in = redis.ttl(key)
if not expires_in:
redis.expire(key, window)
expires_in = window
g.rate_limits = (limit, remaining-1, time()+expires_in)
if remaining > 0:
redis.incr(key, 1)
return func(*args, **kwargs)
return TOO_MANY_REQUESTS
return wrapper
return decorator | TypeError | dataset/ETHPy150Open projectweekend/Flask-PostgreSQL-API-Seed/app/utils/rate_limit.py/rate_limit |
4,653 | @app.after_request
def add_rate_limit_headers(response):
try:
limit, remaining, expires = map(int, g.rate_limits)
except (AttributeError, __HOLE__):
return response
else:
response.headers.add('X-RateLimit-Remaining', remaining)
response.headers.add('X-RateLimit-Limit', limit)
response.headers.add('X-RateLimit-Reset', expires)
return response | ValueError | dataset/ETHPy150Open projectweekend/Flask-PostgreSQL-API-Seed/app/utils/rate_limit.py/add_rate_limit_headers |
4,654 | def check_iterator(self, it, seq, pickle=True):
if pickle:
self.check_pickle(it, seq)
res = []
while 1:
try:
val = next(it)
except __HOLE__:
break
res.append(val)
self.assertEqual(res, seq)
# Helper to check that a for loop generates a given sequence | StopIteration | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.check_iterator |
4,655 | def check_pickle(self, itorg, seq):
d = pickle.dumps(itorg)
it = pickle.loads(d)
# Cannot assert type equality because dict iterators unpickle as list
# iterators.
# self.assertEqual(type(itorg), type(it))
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(list(it), seq)
it = pickle.loads(d)
try:
next(it)
except __HOLE__:
return
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
# Test basic use of iter() function | StopIteration | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.check_pickle |
4,656 | def test_exception_function(self):
def spam(state=[0]):
i = state[0]
state[0] = i+1
if i == 10:
raise RuntimeError
return i
res = []
try:
for x in iter(spam, 20):
res.append(x)
except __HOLE__:
self.assertEqual(res, list(range(10)))
else:
self.fail("should have raised RuntimeError")
# Test exception propagation through sequence iterator | RuntimeError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_exception_function |
4,657 | def test_exception_sequence(self):
class MySequenceClass(SequenceClass):
def __getitem__(self, i):
if i == 10:
raise RuntimeError
return SequenceClass.__getitem__(self, i)
res = []
try:
for x in MySequenceClass(20):
res.append(x)
except __HOLE__:
self.assertEqual(res, list(range(10)))
else:
self.fail("should have raised RuntimeError")
# Test for StopIteration from __getitem__ | RuntimeError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_exception_sequence |
4,658 | def test_iter_file(self):
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"], pickle=False)
self.check_for_loop(f, [], pickle=False)
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test list()'s use of iterators. | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_iter_file |
4,659 | def test_builtin_list(self):
self.assertEqual(list(SequenceClass(5)), list(range(5)))
self.assertEqual(list(SequenceClass(0)), [])
self.assertEqual(list(()), [])
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(d), list(d.keys()))
self.assertRaises(TypeError, list, list)
self.assertRaises(TypeError, list, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"])
f.seek(0, 0)
self.assertEqual(list(f),
["0\n", "1\n", "2\n", "3\n", "4\n"])
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test tuples()'s use of iterators. | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_builtin_list |
4,660 | def test_builtin_tuple(self):
self.assertEqual(tuple(SequenceClass(5)), (0, 1, 2, 3, 4))
self.assertEqual(tuple(SequenceClass(0)), ())
self.assertEqual(tuple([]), ())
self.assertEqual(tuple(()), ())
self.assertEqual(tuple("abc"), ("a", "b", "c"))
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(tuple(d), tuple(d.keys()))
self.assertRaises(TypeError, tuple, list)
self.assertRaises(TypeError, tuple, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n"))
f.seek(0, 0)
self.assertEqual(tuple(f),
("0\n", "1\n", "2\n", "3\n", "4\n"))
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test filter()'s use of iterators. | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_builtin_tuple |
4,661 | def test_builtin_max_min(self):
self.assertEqual(max(SequenceClass(5)), 4)
self.assertEqual(min(SequenceClass(5)), 0)
self.assertEqual(max(8, -1), 8)
self.assertEqual(min(8, -1), -1)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(max(d), "two")
self.assertEqual(min(d), "one")
self.assertEqual(max(d.values()), 3)
self.assertEqual(min(iter(d.values())), 1)
f = open(TESTFN, "w")
try:
f.write("medium line\n")
f.write("xtra large line\n")
f.write("itty-bitty line\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(min(f), "itty-bitty line\n")
f.seek(0, 0)
self.assertEqual(max(f), "xtra large line\n")
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test map()'s use of iterators. | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_builtin_max_min |
4,662 | def test_builtin_map(self):
self.assertEqual(list(map(lambda x: x+1, SequenceClass(5))),
list(range(1, 6)))
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(map(lambda k, d=d: (k, d[k]), d)),
list(d.items()))
dkeys = list(d.keys())
expected = [(i < len(d) and dkeys[i] or None,
i,
i < len(d) and dkeys[i] or None)
for i in range(3)]
f = open(TESTFN, "w")
try:
for i in range(10):
f.write("xy" * i + "\n") # line i has len 2*i+1
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(list(map(len, f)), list(range(1, 21, 2)))
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test zip()'s use of iterators. | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_builtin_map |
4,663 | def test_builtin_zip(self):
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertEqual(list(zip(*[(1, 2), 'ab'])), [(1, 'a'), (2, 'b')])
self.assertRaises(TypeError, zip, None)
self.assertRaises(TypeError, zip, range(10), 42)
self.assertRaises(TypeError, zip, range(10), zip)
self.assertEqual(list(zip(IteratingSequenceClass(3))),
[(0,), (1,), (2,)])
self.assertEqual(list(zip(SequenceClass(3))),
[(0,), (1,), (2,)])
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(d.items()), list(zip(d, d.values())))
# Generate all ints starting at constructor arg.
class IntsFrom:
def __init__(self, start):
self.i = start
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i+1
return i
f = open(TESTFN, "w")
try:
f.write("a\n" "bbb\n" "cc\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(list(zip(IntsFrom(0), f, IntsFrom(-100))),
[(0, "a\n", -100),
(1, "bbb\n", -99),
(2, "cc\n", -98)])
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
self.assertEqual(list(zip(range(5))), [(i,) for i in range(5)])
# Classes that lie about their lengths.
class NoGuessLen5:
def __getitem__(self, i):
if i >= 5:
raise IndexError
return i
class Guess3Len5(NoGuessLen5):
def __len__(self):
return 3
class Guess30Len5(NoGuessLen5):
def __len__(self):
return 30
def lzip(*args):
return list(zip(*args))
self.assertEqual(len(Guess3Len5()), 3)
self.assertEqual(len(Guess30Len5()), 30)
self.assertEqual(lzip(NoGuessLen5()), lzip(range(5)))
self.assertEqual(lzip(Guess3Len5()), lzip(range(5)))
self.assertEqual(lzip(Guess30Len5()), lzip(range(5)))
expected = [(i, i) for i in range(5)]
for x in NoGuessLen5(), Guess3Len5(), Guess30Len5():
for y in NoGuessLen5(), Guess3Len5(), Guess30Len5():
self.assertEqual(lzip(x, y), expected) | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_builtin_zip |
4,664 | def test_unicode_join_endcase(self):
# This class inserts a Unicode object into its argument's natural
# iteration, in the 3rd position.
class OhPhooey:
def __init__(self, seq):
self.it = iter(seq)
self.i = 0
def __iter__(self):
return self
def __next__(self):
i = self.i
self.i = i+1
if i == 2:
return "fooled you!"
return next(self.it)
f = open(TESTFN, "w")
try:
f.write("a\n" + "b\n" + "c\n")
finally:
f.close()
f = open(TESTFN, "r")
# Nasty: string.join(s) can't know whether unicode.join() is needed
# until it's seen all of s's elements. But in this case, f's
# iterator cannot be restarted. So what we're testing here is
# whether string.join() can manage to remember everything it's seen
# and pass that on to unicode.join().
try:
got = " - ".join(OhPhooey(f))
self.assertEqual(got, "a\n - b\n - fooled you! - c\n")
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test iterators with 'x in y' and 'x not in y'. | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_unicode_join_endcase |
4,665 | def test_in_and_not_in(self):
for sc5 in IteratingSequenceClass(5), SequenceClass(5):
for i in range(5):
self.assertIn(i, sc5)
for i in "abc", -1, 5, 42.42, (3, 4), [], {1: 1}, 3-12j, sc5:
self.assertNotIn(i, sc5)
self.assertRaises(TypeError, lambda: 3 in 12)
self.assertRaises(TypeError, lambda: 3 not in map)
d = {"one": 1, "two": 2, "three": 3, 1j: 2j}
for k in d:
self.assertIn(k, d)
self.assertNotIn(k, d.values())
for v in d.values():
self.assertIn(v, d.values())
self.assertNotIn(v, d)
for k, v in d.items():
self.assertIn((k, v), d.items())
self.assertNotIn((v, k), d.items())
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for chunk in "abc":
f.seek(0, 0)
self.assertNotIn(chunk, f)
f.seek(0, 0)
self.assertIn((chunk + "\n"), f)
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test iterators with operator.countOf (PySequence_Count). | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_in_and_not_in |
4,666 | def test_countOf(self):
from operator import countOf
self.assertEqual(countOf([1,2,2,3,2,5], 2), 3)
self.assertEqual(countOf((1,2,2,3,2,5), 2), 3)
self.assertEqual(countOf("122325", "2"), 3)
self.assertEqual(countOf("122325", "6"), 0)
self.assertRaises(TypeError, countOf, 42, 1)
self.assertRaises(TypeError, countOf, countOf, countOf)
d = {"one": 3, "two": 3, "three": 3, 1j: 2j}
for k in d:
self.assertEqual(countOf(d, k), 1)
self.assertEqual(countOf(d.values(), 3), 3)
self.assertEqual(countOf(d.values(), 2j), 1)
self.assertEqual(countOf(d.values(), 1j), 0)
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "b\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for letter, count in ("a", 1), ("b", 2), ("c", 1), ("d", 0):
f.seek(0, 0)
self.assertEqual(countOf(f, letter + "\n"), count)
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test iterators with operator.indexOf (PySequence_Index). | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_countOf |
4,667 | def test_indexOf(self):
from operator import indexOf
self.assertEqual(indexOf([1,2,2,3,2,5], 1), 0)
self.assertEqual(indexOf((1,2,2,3,2,5), 2), 1)
self.assertEqual(indexOf((1,2,2,3,2,5), 3), 3)
self.assertEqual(indexOf((1,2,2,3,2,5), 5), 5)
self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 0)
self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 6)
self.assertEqual(indexOf("122325", "2"), 1)
self.assertEqual(indexOf("122325", "5"), 5)
self.assertRaises(ValueError, indexOf, "122325", "6")
self.assertRaises(TypeError, indexOf, 42, 1)
self.assertRaises(TypeError, indexOf, indexOf, indexOf)
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "d\n" "e\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
fiter = iter(f)
self.assertEqual(indexOf(fiter, "b\n"), 1)
self.assertEqual(indexOf(fiter, "d\n"), 1)
self.assertEqual(indexOf(fiter, "e\n"), 0)
self.assertRaises(ValueError, indexOf, fiter, "a\n")
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
iclass = IteratingSequenceClass(3)
for i in range(3):
self.assertEqual(indexOf(iclass, i), i)
self.assertRaises(ValueError, indexOf, iclass, -1)
# Test iterators with file.writelines(). | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_indexOf |
4,668 | def test_writelines(self):
f = open(TESTFN, "w")
try:
self.assertRaises(TypeError, f.writelines, None)
self.assertRaises(TypeError, f.writelines, 42)
f.writelines(["1\n", "2\n"])
f.writelines(("3\n", "4\n"))
f.writelines({'5\n': None})
f.writelines({})
# Try a big chunk too.
class Iterator:
def __init__(self, start, finish):
self.start = start
self.finish = finish
self.i = self.start
def __next__(self):
if self.i >= self.finish:
raise StopIteration
result = str(self.i) + '\n'
self.i += 1
return result
def __iter__(self):
return self
class Whatever:
def __init__(self, start, finish):
self.start = start
self.finish = finish
def __iter__(self):
return Iterator(self.start, self.finish)
f.writelines(Whatever(6, 6+2000))
f.close()
f = open(TESTFN)
expected = [str(i) + "\n" for i in range(1, 2006)]
self.assertEqual(list(f), expected)
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
# Test iterators on RHS of unpacking assignments. | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_writelines |
4,669 | def test_unpack_iter(self):
a, b = 1, 2
self.assertEqual((a, b), (1, 2))
a, b, c = IteratingSequenceClass(3)
self.assertEqual((a, b, c), (0, 1, 2))
try: # too many values
a, b = IteratingSequenceClass(3)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not enough values
a, b, c = IteratingSequenceClass(2)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not iterable
a, b, c = len
except TypeError:
pass
else:
self.fail("should have raised TypeError")
a, b, c = {1: 42, 2: 42, 3: 42}.values()
self.assertEqual((a, b, c), (42, 42, 42))
f = open(TESTFN, "w")
lines = ("a\n", "bb\n", "ccc\n")
try:
for line in lines:
f.write(line)
finally:
f.close()
f = open(TESTFN, "r")
try:
a, b, c = f
self.assertEqual((a, b, c), lines)
finally:
f.close()
try:
unlink(TESTFN)
except __HOLE__:
pass
(a, b), (c,) = IteratingSequenceClass(2), {42: 24}
self.assertEqual((a, b, c), (0, 1, 42)) | OSError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_unpack_iter |
4,670 | @cpython_only
def test_ref_counting_behavior(self):
class C(object):
count = 0
def __new__(cls):
cls.count += 1
return object.__new__(cls)
def __del__(self):
cls = self.__class__
assert cls.count > 0
cls.count -= 1
x = C()
self.assertEqual(C.count, 1)
del x
self.assertEqual(C.count, 0)
l = [C(), C(), C()]
self.assertEqual(C.count, 3)
try:
a, b = iter(l)
except __HOLE__:
pass
del l
self.assertEqual(C.count, 0)
# Make sure StopIteration is a "sink state".
# This tests various things that weren't sink states in Python 2.2.1,
# plus various things that always were fine. | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_ref_counting_behavior |
4,671 | def test_3720(self):
# Avoid a crash, when an iterator deletes its next() method.
class BadIterator(object):
def __iter__(self):
return self
def __next__(self):
del BadIterator.__next__
return 1
try:
for i in BadIterator() :
pass
except __HOLE__:
pass | TypeError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_iter.py/TestCase.test_3720 |
4,672 | def _dispatch_channel_events(self):
"""Invoke the `_dispatch_events` method on open channels that requested
it
"""
if not self._channels_pending_dispatch:
return
with self._acquire_event_dispatch() as dispatch_acquired:
if not dispatch_acquired:
# Nested dispatch or dispatch blocked higher in call stack
return
candidates = list(self._channels_pending_dispatch)
self._channels_pending_dispatch.clear()
for channel_number in candidates:
if channel_number < 0:
# This was meant to terminate process_data_events
continue
try:
impl_channel = self._impl._channels[channel_number]
except __HOLE__:
continue
if impl_channel.is_open:
impl_channel._get_cookie()._dispatch_events() | KeyError | dataset/ETHPy150Open pika/pika/pika/adapters/blocking_connection.py/BlockingConnection._dispatch_channel_events |
4,673 | def _dispatch_connection_events(self):
"""Dispatch ready connection events"""
if not self._ready_events:
return
with self._acquire_event_dispatch() as dispatch_acquired:
if not dispatch_acquired:
# Nested dispatch or dispatch blocked higher in call stack
return
# Limit dispatch to the number of currently ready events to avoid
# getting stuck in this loop
for _ in compat.xrange(len(self._ready_events)):
try:
evt = self._ready_events.popleft()
except __HOLE__:
# Some events (e.g., timers) must have been cancelled
break
evt.dispatch() | IndexError | dataset/ETHPy150Open pika/pika/pika/adapters/blocking_connection.py/BlockingConnection._dispatch_connection_events |
4,674 | def basic_cancel(self, consumer_tag):
"""This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send any more
messages for that consumer. The client may receive an arbitrary number
of messages in between sending the cancel method and receiving the
cancel-ok reply.
NOTE: When cancelling a no_ack=False consumer, this implementation
automatically Nacks and suppresses any incoming messages that have not
yet been dispatched to the consumer's callback. However, when cancelling
a no_ack=True consumer, this method will return any pending messages
that arrived before broker confirmed the cancellation.
:param str consumer_tag: Identifier for the consumer; the result of
passing a consumer_tag that was created on another channel is
undefined (bad things will happen)
:returns: (NEW IN pika 0.10.0) empty sequence for a no_ack=False
consumer; for a no_ack=True consumer, returns a (possibly empty)
sequence of pending messages that arrived before broker confirmed
the cancellation (this is done instead of via consumer's callback in
order to prevent reentrancy/recursion. Each message is four-tuple:
(channel, method, properties, body)
channel: BlockingChannel
method: spec.Basic.Deliver
properties: spec.BasicProperties
body: str or unicode
"""
try:
consumer_info = self._consumer_infos[consumer_tag]
except __HOLE__:
LOGGER.warn("User is attempting to cancel an unknown consumer=%s; "
"already cancelled by user or broker?", consumer_tag)
return []
try:
# Assertion failure here is most likely due to reentrance
assert consumer_info.active or consumer_info.cancelled_by_broker, (
consumer_info.state)
# Assertion failure here signals disconnect between consumer state
# in BlockingChannel and Channel
assert (consumer_info.cancelled_by_broker or
consumer_tag in self._impl._consumers), consumer_tag
no_ack = consumer_info.no_ack
consumer_info.state = _ConsumerInfo.TEARING_DOWN
with _CallbackResult() as cancel_ok_result:
# Nack pending messages for no_ack=False consumer
if not no_ack:
pending_messages = self._remove_pending_deliveries(
consumer_tag)
if pending_messages:
# NOTE: we use impl's basic_reject to avoid the
# possibility of redelivery before basic_cancel takes
# control of nacking.
# NOTE: we can't use basic_nack with the multiple option
# to avoid nacking messages already held by our client.
for message in pending_messages:
self._impl.basic_reject(message.method.delivery_tag,
requeue=True)
# Cancel the consumer; impl takes care of rejecting any
# additional deliveries that arrive for a no_ack=False
# consumer
self._impl.basic_cancel(
callback=cancel_ok_result.signal_once,
consumer_tag=consumer_tag,
nowait=False)
# Flush output and wait for Basic.Cancel-ok or
# broker-initiated Basic.Cancel
self._flush_output(
cancel_ok_result.is_ready,
lambda: consumer_tag not in self._impl._consumers)
if no_ack:
# Return pending messages for no_ack=True consumer
return [
(evt.method, evt.properties, evt.body)
for evt in self._remove_pending_deliveries(consumer_tag)]
else:
# impl takes care of rejecting any incoming deliveries during
# cancellation
messages = self._remove_pending_deliveries(consumer_tag)
assert not messages, messages
return []
finally:
# NOTE: The entry could be purged if channel or connection closes
if consumer_tag in self._consumer_infos:
del self._consumer_infos[consumer_tag] | KeyError | dataset/ETHPy150Open pika/pika/pika/adapters/blocking_connection.py/BlockingChannel.basic_cancel |
4,675 | def get_dock_json(self):
""" return dock json from existing build json """
env_json = self.build_json['spec']['strategy']['customStrategy']['env']
try:
p = [env for env in env_json if env["name"] == "ATOMIC_REACTOR_PLUGINS"]
except __HOLE__:
raise RuntimeError("\"env\" is not iterable")
if len(p) <= 0:
raise RuntimeError("\"env\" misses key ATOMIC_REACTOR_PLUGINS")
dock_json_str = p[0]['value']
dock_json = json.loads(dock_json_str)
return dock_json | TypeError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/manipulate.py/DockJsonManipulator.get_dock_json |
4,676 | def dock_json_has_plugin_conf(self, plugin_type, plugin_name):
"""
Check whether a plugin is configured.
"""
try:
self.dock_json_get_plugin_conf(plugin_type, plugin_name)
return True
except (__HOLE__, IndexError):
return False | KeyError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/manipulate.py/DockJsonManipulator.dock_json_has_plugin_conf |
4,677 | def _dock_json_get_plugin_conf_or_fail(self, plugin_type, plugin_name):
try:
conf = self.dock_json_get_plugin_conf(plugin_type, plugin_name)
except __HOLE__:
raise RuntimeError("Invalid dock json: plugin type '%s' misses" % plugin_type)
except IndexError:
raise RuntimeError("no such plugin in dock json: \"%s\"" % plugin_name)
return conf | KeyError | dataset/ETHPy150Open projectatomic/osbs-client/osbs/build/manipulate.py/DockJsonManipulator._dock_json_get_plugin_conf_or_fail |
4,678 | def test_intconversion(self):
# Test __int__()
class ClassicMissingMethods:
pass
if is_jython:
self.assertRaises(TypeError, int, ClassicMissingMethods())
else:
self.assertRaises(AttributeError, int, ClassicMissingMethods())
class MissingMethods(object):
pass
self.assertRaises(TypeError, int, MissingMethods())
class Foo0:
def __int__(self):
return 42
class Foo1(object):
def __int__(self):
return 42
class Foo2(int):
def __int__(self):
return 42
class Foo3(int):
def __int__(self):
return self
class Foo4(int):
def __int__(self):
return 42L
class Foo5(int):
def __int__(self):
return 42.
self.assertEqual(int(Foo0()), 42)
self.assertEqual(int(Foo1()), 42)
self.assertEqual(int(Foo2()), 42)
self.assertEqual(int(Foo3()), 0)
self.assertEqual(int(Foo4()), 42L)
self.assertRaises(TypeError, int, Foo5())
class Classic:
pass
for base in (object, Classic):
class IntOverridesTrunc(base):
def __int__(self):
return 42
def __trunc__(self):
return -12
self.assertEqual(int(IntOverridesTrunc()), 42)
class JustTrunc(base):
def __trunc__(self):
return 42
self.assertEqual(int(JustTrunc()), 42)
for trunc_result_base in (object, Classic):
class Integral(trunc_result_base):
def __int__(self):
return 42
class TruncReturnsNonInt(base):
def __trunc__(self):
return Integral()
self.assertEqual(int(TruncReturnsNonInt()), 42)
class NonIntegral(trunc_result_base):
def __trunc__(self):
# Check that we avoid infinite recursion.
return NonIntegral()
class TruncReturnsNonIntegral(base):
def __trunc__(self):
return NonIntegral()
try:
int(TruncReturnsNonIntegral())
except __HOLE__ as e:
if not is_jython:
self.assertEqual(str(e),
"__trunc__ returned non-Integral"
" (type NonIntegral)")
else:
self.fail("Failed to raise TypeError with %s" %
((base, trunc_result_base),)) | TypeError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_int.py/IntTestCases.test_intconversion |
4,679 | def _get_from_read_queue(self):
"""Fetch a frame from the read queue and return it, otherwise return
None
:rtype: pamqp.specification.Frame
"""
try:
frame_value = self._read_queue.get(False)
except Queue.Empty:
return None
try:
self._read_queue.task_done()
except __HOLE__:
pass
return frame_value | ValueError | dataset/ETHPy150Open gmr/rabbitpy/rabbitpy/channel.py/Channel._get_from_read_queue |
4,680 | @property
def title(self):
titles = [m.value for m in self.meta if m.type == 'title']
try:
return titles[0][0]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open hypothesis/h/h/api/models/document.py/Document.title |
4,681 | def mkdir_p(path):
try:
os.makedirs(path)
except __HOLE__:
if not os.path.isdir(path):
raise | OSError | dataset/ETHPy150Open nitely/Spirit/spirit/core/utils/__init__.py/mkdir_p |
4,682 | def _json_to_flat_metrics(self, prefix, data):
for key, value in data.items():
if isinstance(value, dict):
for k, v in self._json_to_flat_metrics(
"%s.%s" % (prefix, key), value):
yield k, v
else:
try:
int(value)
except __HOLE__:
value = None
finally:
yield ("%s.%s" % (prefix, key), value) | ValueError | dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/httpjson/httpjson.py/HTTPJSONCollector._json_to_flat_metrics |
4,683 | def collect(self):
url = self.config['url']
req = urllib2.Request(url)
req.add_header('Content-type', 'application/json')
try:
resp = urllib2.urlopen(req)
except urllib2.URLError as e:
self.log.error("Can't open url %s. %s", url, e)
else:
content = resp.read()
try:
data = json.loads(content)
except __HOLE__ as e:
self.log.error("Can't parse JSON object from %s. %s", url, e)
else:
for metric_name, metric_value in self._json_to_flat_metrics(
"", data):
self.publish(metric_name, metric_value) | ValueError | dataset/ETHPy150Open BrightcoveOS/Diamond/src/collectors/httpjson/httpjson.py/HTTPJSONCollector.collect |
4,684 | def create_capture(source = 0, fallback = presets['chess']):
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''
source = str(source).strip()
chunks = source.split(':')
# hanlde drive letter ('c:', ...)
if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
chunks[1] = chunks[0] + ':' + chunks[1]
del chunks[0]
source = chunks[0]
try: source = int(source)
except __HOLE__: pass
params = dict( s.split('=') for s in chunks[1:] )
cap = None
if source == 'synth':
Class = classes.get(params.get('class', None), VideoSynthBase)
try: cap = Class(**params)
except: pass
else:
cap = cv2.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, w)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
print 'Warning: unable to open video source: ', source
if fallback is not None:
return create_capture(fallback, None)
return cap | ValueError | dataset/ETHPy150Open bluquar/cubr/video.py/create_capture |
4,685 | def _file_exists(self):
"""
Check that the file exists. Also try to check the absolute path. If the
file is found within the absolute path, then update the file path
"""
file_found = True
if not os.path.exists(self.file):
if os.path.exists(os.path.join(Constants.ALTER_DIR, self.file)):
self.file = os.path.join(Constants.ALTER_DIR, self.file)
else:
file_found = False
# populate ref if file found
if file_found:
try:
my_file = open(self.file)
head = list(islice(my_file, 3))
except __HOLE__, ex:
if 'my_file' in locals():
my_file.close()
sys.stderr.write("Error reading file '%s'\n\t=>%s\n" % (self.file, ex.message))
if not MetaDataUtil.parse_direction(head) == 'up':
sys.stderr.write("File can only be an up-alter: '%s'" % self.file)
meta_data = MetaDataUtil.parse_meta(head)
if 'ref' in meta_data:
self.ref = meta_data['ref']
return file_found | OSError | dataset/ETHPy150Open appnexus/schema-tool/schematool/command/resolve.py/ResolveCommand._file_exists |
4,686 | def _relocate_files(self, old_filename, new_filename, new_ref, new_backref, direction):
"""
Move the file on disk. Not really a big task, but it might be nice in the
future to go ahead and do a git mv command for the user.
"""
is_backref_line = re.compile('--\s*backref\s*:\s*(\d+)')
is_ref_line = re.compile('--\s*ref\s*:\s*(\d+)')
found_ref = False
found_backref = False
old_filename_with_dir = os.path.join(Constants.ALTER_DIR, old_filename)
new_filename_with_dir = os.path.join(Constants.ALTER_DIR, new_filename)
# create the new file
try:
new_file = open(new_filename_with_dir, 'w')
old_file = open(old_filename_with_dir, 'r')
lines = old_file.readlines()
for line in lines:
if not found_ref and is_ref_line.match(line) is not None:
new_file.write('-- ref: %s\n' % new_ref)
found_ref = True
elif not found_backref and is_backref_line.match(line) is not None:
new_file.write('-- backref: %s\n' % new_backref)
found_backref = True
else:
new_file.write(line)
new_file.close()
old_file.close()
except OSError, ex:
sys.stderr.write("Error renaming file '%s'\n\t=>%s\n" % (old_filename_with_dir, ex.message))
if 'new_file' in locals():
new_file.close()
if 'old_file' in locals():
old_file.close()
# delete the old file
try:
os.remove(old_filename_with_dir)
except __HOLE__, ex:
sys.stderr.write("Could not delete file '%s'\n\t=>%s\n" % (old_filename_with_dir, ex.message))
# create the new static file
if self.config.get('static_alter_dir'):
old_static_filename = os.path.join(self.config['static_alter_dir'], old_filename)
new_static_filename = os.path.join(self.config['static_alter_dir'], new_filename)
content = open(new_filename_with_dir).read()
if direction == 'up':
rev_query = self.db.get_append_commit_query(new_ref)
else:
assert direction == 'down'
rev_query = self.db.get_remove_commit_query(new_ref)
content += '\n\n-- start rev query\n%s;\n-- end rev query\n' % rev_query.encode('utf-8')
f = open(new_static_filename, 'w')
f.write(content)
f.close()
# delete the old static file, and add the new static file.
static_file_commands = [
['git', 'rm', '--ignore-unmatch', old_static_filename],
['git', 'add', new_static_filename],
]
else:
static_file_commands = []
# perform Git updates (add and rm -> rename in essence)
commands = [
['git', 'rm', '%s' % old_filename_with_dir],
['git', 'add', '%s' % new_filename_with_dir],
] + static_file_commands
try:
for cmd in commands:
proc = subprocess.Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
_, stderr = proc.communicate()
if not proc.returncode == 0 and stderr is not None:
sys.stderr.write("Error")
sys.stderr.write("\n----------------------\n")
sys.stderr.write(proc.stderr)
sys.stderr.write("\n----------------------\n")
sys.stderr.write("\n")
except Exception, ex:
sys.stderr.write("Error performing git operations\n\t=>%s\n" % ex.message) | OSError | dataset/ETHPy150Open appnexus/schema-tool/schematool/command/resolve.py/ResolveCommand._relocate_files |
4,687 | def clean_built(self, storage):
"""
Clear any static files that aren't from the apps.
"""
build_dirs, built_files = self.find_all(storage)
found_files = set()
for finder in finders.get_finders():
for path, s in finder.list([]):
# Prefix the relative path if the source storage contains it
if getattr(s, 'prefix', None):
prefixed_path = os.path.join(s.prefix, path)
else:
prefixed_path = path
found_files.add(prefixed_path)
stale_files = built_files - found_files
for fpath in stale_files:
self.log(u"Deleting '%s'" % smart_unicode(fpath), level=1)
storage.delete(fpath)
found_dirs = set()
for f in found_files:
path = f
while True:
path = os.path.dirname(path)
found_dirs.add(path)
if not path:
break
stale_dirs = set(build_dirs) - found_dirs
for fpath in stale_dirs:
try:
storage.delete(fpath)
except __HOLE__:
self.log(u"Couldn't remove empty directory '%s'" % smart_unicode(fpath), level=1)
else:
self.log(u"Deleted empty directory '%s'" % smart_unicode(fpath), level=1) | OSError | dataset/ETHPy150Open hzdg/django-staticbuilder/staticbuilder/management/commands/collectforbuild.py/Command.clean_built |
4,688 | def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i)}
if self.data or self.files:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial:
try:
defaults['initial'] = self.initial[i]
except __HOLE__:
pass
# Allow extra forms to be empty.
if i >= self._initial_form_count:
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form | IndexError | dataset/ETHPy150Open dcramer/django-compositepks/django/forms/formsets.py/BaseFormSet._construct_form |
4,689 | def full_clean(self):
"""
Cleans all of self.data and populates self._errors.
"""
self._errors = []
if not self.is_bound: # Stop further processing.
return
for i in range(0, self._total_form_count):
form = self.forms[i]
self._errors.append(form.errors)
# Give self.clean() a chance to do cross-form validation.
try:
self.clean()
except __HOLE__, e:
self._non_form_errors = e.messages | ValidationError | dataset/ETHPy150Open dcramer/django-compositepks/django/forms/formsets.py/BaseFormSet.full_clean |
4,690 | def _extract_retry_after_timeout(response):
'''Returns the time in seconds that the server is asking us to
wait. The information is deduced from the server http response.'''
try:
seconds_to_wait = int(response.headers.get('retry-after', DEFAULT_RETRY_AFTER_503_INTERVAL))
except __HOLE__:
# retry-after could be formatted as absolute time
# instead of seconds to wait. We don't know how to
# parse that, but the apiserver doesn't generate
# such responses anyway.
seconds_to_wait = DEFAULT_RETRY_AFTER_503_INTERVAL
return max(1, seconds_to_wait)
# Truncate the message, if the error injection flag is on, and other
# conditions hold. This causes a BadRequest 400 HTTP code, which is
# subsequentally retried.
#
# Note: the minimal upload size for S3 is 5MB. In theory, you are
# supposed to get an "EntityTooSmall" error from S3, which has a 400
# code. However, I have not observed such responses in practice.
# http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html | ValueError | dataset/ETHPy150Open dnanexus/dx-toolkit/src/python/dxpy/__init__.py/_extract_retry_after_timeout |
4,691 | def DXHTTPRequest(resource, data, method='POST', headers=None, auth=True,
timeout=DEFAULT_TIMEOUT,
use_compression=None, jsonify_data=True, want_full_response=False,
decode_response_body=True, prepend_srv=True, session_handler=None,
max_retries=DEFAULT_RETRIES, always_retry=False,
**kwargs):
'''
:param resource: API server route, e.g. "/record/new". If *prepend_srv* is False, a fully qualified URL is expected. If this argument is a callable, it will be called just before each request attempt, and expected to return a tuple (URL, headers). Headers returned by the callback are updated with *headers* (including headers set by this method).
:type resource: string
:param data: Content of the request body
:type data: list or dict, if *jsonify_data* is True; or string or file-like object, otherwise
:param headers: Names and values of HTTP headers to submit with the request (in addition to those needed for authentication, compression, or other options specified with the call).
:type headers: dict
:param auth:
Controls the ``Authentication`` header or other means of authentication supplied with the request. If ``True``
(default), a token is obtained from the ``DX_SECURITY_CONTEXT``. If the value evaluates to false, no action is
taken to prepare authentication for the request. Otherwise, the value is assumed to be callable, and called with
three arguments (method, url, headers) and expected to prepare the authentication headers by reference.
:type auth: tuple, object, True (default), or None
:param timeout: HTTP request timeout, in seconds
:type timeout: float
:param config: *config* value to pass through to :meth:`requests.request`
:type config: dict
:param use_compression: Deprecated
:type use_compression: string or None
:param jsonify_data: If True, *data* is converted from a Python list or dict to a JSON string
:type jsonify_data: boolean
:param want_full_response: If True, the full :class:`requests.Response` object is returned (otherwise, only the content of the response body is returned)
:type want_full_response: boolean
:param decode_response_body: If True (and *want_full_response* is False), the response body is decoded and, if it is a JSON string, deserialized. Otherwise, the response body is uncompressed if transport compression is on, and returned raw.
:type decode_response_body: boolean
:param prepend_srv: If True, prepends the API server location to the URL
:type prepend_srv: boolean
:param session_handler: Deprecated.
:param max_retries: Maximum number of retries to perform for a request. A "failed" request is retried if any of the following is true:
- A response is received from the server, and the content length received does not match the "Content-Length" header.
- A response is received from the server, and the response has an HTTP status code in 5xx range.
- A response is received from the server, the "Content-Length" header is not set, and the response JSON cannot be parsed.
- No response is received from the server, and either *always_retry* is True or the request *method* is "GET".
:type max_retries: int
:param always_retry: If True, indicates that it is safe to retry a request on failure
- Note: It is not guaranteed that the request will *always* be retried on failure; rather, this is an indication to the function that it would be safe to do so.
:type always_retry: boolean
:returns: Response from API server in the format indicated by *want_full_response* and *decode_response_body*.
:raises: :exc:`exceptions.DXAPIError` or a subclass if the server returned a non-200 status code; :exc:`requests.exceptions.HTTPError` if an invalid response was received from the server; or :exc:`requests.exceptions.ConnectionError` if a connection cannot be established.
Wrapper around :meth:`requests.request()` that makes an HTTP
request, inserting authentication headers and (by default)
converting *data* to JSON.
.. note:: Bindings methods that make API calls make the underlying
HTTP request(s) using :func:`DXHTTPRequest`, and most of them
will pass any unrecognized keyword arguments you have supplied
through to :func:`DXHTTPRequest`.
'''
if headers is None:
headers = {}
global _UPGRADE_NOTIFY
url = APISERVER + resource if prepend_srv else resource
method = method.upper() # Convert method name to uppercase, to ease string comparisons later
if auth is True:
auth = AUTH_HELPER
if auth:
auth(_RequestForAuth(method, url, headers))
pool_args = {arg: kwargs.pop(arg, None) for arg in ("verify", "cert_file", "key_file")}
if _DEBUG >= 2:
if isinstance(data, basestring) or isinstance(data, mmap.mmap):
if len(data) == 0:
formatted_data = '""'
else:
formatted_data = "<file data>"
else:
try:
if _DEBUG >= 3:
formatted_data = json.dumps(data, indent=2)
else:
formatted_data = json.dumps(data)
except (__HOLE__, TypeError):
formatted_data = "<binary data>"
if jsonify_data:
data = json.dumps(data)
if 'Content-Type' not in headers and method == 'POST':
headers['Content-Type'] = 'application/json'
# If the input is a buffer, its data gets consumed by
# requests.request (moving the read position). Record the initial
# buffer position so that we can return to it if the request fails
# and needs to be retried.
rewind_input_buffer_offset = None
if hasattr(data, 'seek') and hasattr(data, 'tell'):
rewind_input_buffer_offset = data.tell()
try_index = 0
while True:
success, time_started = True, None
response = None
try:
if _DEBUG > 0:
time_started = time.time()
_method, _url, _headers = _process_method_url_headers(method, url, headers)
if _DEBUG >= 2:
maybe_headers = ''
if 'Range' in _headers:
maybe_headers = " " + json.dumps({"Range": _headers["Range"]})
print("%s %s%s => %s\n" % (method, _url, maybe_headers, formatted_data), file=sys.stderr, end="")
elif _DEBUG > 0:
from repr import Repr
print("%s %s => %s\n" % (method, _url, Repr().repr(data)), file=sys.stderr, end="")
body = _maybe_trucate_request(_url, try_index, data)
# throws BadStatusLine if the server returns nothing
response = _get_pool_manager(**pool_args).request(_method, _url, headers=_headers, body=body,
timeout=timeout, retries=False, **kwargs)
req_id = response.headers.get("x-request-id", "unavailable")
if _UPGRADE_NOTIFY and response.headers.get('x-upgrade-info', '').startswith('A recommended update is available') and '_ARGCOMPLETE' not in os.environ:
logger.info(response.headers['x-upgrade-info'])
try:
with file(_UPGRADE_NOTIFY, 'a'):
os.utime(_UPGRADE_NOTIFY, None)
except:
pass
_UPGRADE_NOTIFY = False
# If an HTTP code that is not in the 200 series is received and the content is JSON, parse it and throw the
# appropriate error. Otherwise, raise the usual exception.
if response.status // 100 != 2:
# response.headers key lookup is case-insensitive
if response.headers.get('content-type', '').startswith('application/json'):
content = response.data.decode('utf-8')
try:
content = json.loads(content)
except ValueError:
# The JSON is not parsable, but we should be able to retry.
raise exceptions.BadJSONInReply("Invalid JSON received from server", response.status)
try:
error_class = getattr(exceptions, content["error"]["type"], exceptions.DXAPIError)
except (KeyError, AttributeError, TypeError):
error_class = exceptions.HTTPError
raise error_class(content, response.status)
else:
content = response.data.decode('utf-8')
raise exceptions.HTTPError("{} {} [RequestID={}]\n{}".format(response.status,
response.reason,
req_id,
content))
if want_full_response:
return response
else:
if 'content-length' in response.headers:
if int(response.headers['content-length']) != len(response.data):
range_str = (' (%s)' % (headers['Range'],)) if 'Range' in headers else ''
raise exceptions.ContentLengthError(
"Received response with content-length header set to %s but content length is %d%s. " +
"[RequestID=%s]" %
(response.headers['content-length'], len(response.data), range_str, req_id)
)
content = response.data
if decode_response_body:
content = content.decode('utf-8')
if response.headers.get('content-type', '').startswith('application/json'):
try:
content = json.loads(content)
except ValueError:
# The JSON is not parsable, but we should be able to retry.
raise exceptions.BadJSONInReply("Invalid JSON received from server", response.status)
if _DEBUG > 0:
t = int((time.time() - time_started) * 1000)
req_id = response.headers.get('x-request-id')
if _DEBUG >= 3:
print(method, req_id, url, "<=", response.status, "(%dms)" % t,
"\n" + json.dumps(content, indent=2), file=sys.stderr)
elif _DEBUG == 2:
print(method, req_id, url, "<=", response.status, "(%dms)" % t, json.dumps(content),
file=sys.stderr)
elif _DEBUG > 0:
print(method, req_id, url, "<=", response.status, "(%dms)" % t, Repr().repr(content),
file=sys.stderr)
return content
raise AssertionError('Should never reach this line: expected a result to have been returned by now')
except Exception as e:
# Avoid reusing connections in the pool, since they may be
# in an inconsistent state (observed as "ResponseNotReady"
# errors).
_get_pool_manager(**pool_args).clear()
success = False
exception_msg = _extract_msg_from_last_exception()
if isinstance(e, _expected_exceptions):
if response is not None and response.status == 503:
seconds_to_wait = _extract_retry_after_timeout(response)
logger.warn("%s %s: %s. Waiting %d seconds due to server unavailability...",
method, url, exception_msg, seconds_to_wait)
time.sleep(seconds_to_wait)
# Note, we escape the "except" block here without
# incrementing try_index because 503 responses with
# Retry-After should not count against the number of
# permitted retries.
continue
# Total number of allowed tries is the initial try + up to
# (max_retries) subsequent retries.
total_allowed_tries = max_retries + 1
ok_to_retry = False
is_retryable = always_retry or (method == 'GET') or _is_retryable_exception(e)
# Because try_index is not incremented until we escape this
# iteration of the loop, try_index is equal to the number of
# tries that have failed so far, minus one. Test whether we
# have exhausted all retries.
#
# BadStatusLine --- server did not return anything
# BadJSONInReply --- server returned JSON that didn't parse properly
if try_index + 1 < total_allowed_tries:
if response is None or \
isinstance(e, (exceptions.ContentLengthError, BadStatusLine, exceptions.BadJSONInReply, \
urllib3.exceptions.ProtocolError)):
ok_to_retry = is_retryable
else:
ok_to_retry = 500 <= response.status < 600
# The server has closed the connection prematurely
if response is not None and \
response.status == 400 and is_retryable and method == 'PUT' and \
isinstance(e, requests.exceptions.HTTPError):
if '<Code>RequestTimeout</Code>' in exception_msg:
logger.info("Retrying 400 HTTP error, due to slow data transfer")
else:
logger.info("400 HTTP error, of unknown origin, exception_msg=[%s]", exception_msg)
ok_to_retry = True
if ok_to_retry:
if rewind_input_buffer_offset is not None:
data.seek(rewind_input_buffer_offset)
delay = min(2 ** try_index, DEFAULT_TIMEOUT)
range_str = (' (range=%s)' % (headers['Range'],)) if 'Range' in headers else ''
logger.warn("%s %s: %s. Waiting %d seconds before retry %d of %d... %s",
method, url, exception_msg, delay, try_index + 1, max_retries, range_str)
time.sleep(delay)
try_index += 1
continue
# All retries have been exhausted OR the error is deemed not
# retryable. Print the latest error and propagate it back to the caller.
if not isinstance(e, exceptions.DXAPIError):
logger.error("%s %s: %s", method, url, exception_msg)
# Retries have been exhausted, and we are unable to get a full
# buffer from the data source. Raise a special exception.
if isinstance(e, urllib3.exceptions.ProtocolError) and \
'Connection broken: IncompleteRead' in exception_msg:
raise exceptions.DXIncompleteReadsError(exception_msg)
raise
finally:
if success and try_index > 0:
logger.info("%s %s: Recovered after %d retries", method, url, try_index)
raise AssertionError('Should never reach this line: should have attempted a retry or reraised by now')
raise AssertionError('Should never reach this line: should never break out of loop') | UnicodeDecodeError | dataset/ETHPy150Open dnanexus/dx-toolkit/src/python/dxpy/__init__.py/DXHTTPRequest |
4,692 | def setOptions(self, options):
"""
Creates a config object from the options object.
"""
from bd2k.util.humanize import human2bytes #This import is used to convert
#from human readable quantites to integers
def setOption(varName, parsingFn=None, checkFn=None):
#If options object has the option "varName" specified
#then set the "varName" attrib to this value in the config object
x = getattr(options, varName, None)
if x is not None:
if parsingFn is not None:
x = parsingFn(x)
if checkFn is not None:
try:
checkFn(x)
except __HOLE__:
raise RuntimeError("The %s option has an invalid value: %s"
% (varName, x))
setattr(self, varName, x)
# Function to parse integer from string expressed in different formats
h2b = lambda x : human2bytes(str(x))
def iC(minValue, maxValue=sys.maxint):
# Returns function that checks if a given int is in the given half-open interval
assert isinstance(minValue, int) and isinstance(maxValue, int)
return lambda x: minValue <= x < maxValue
def fC(minValue, maxValue=None):
# Returns function that checks if a given float is in the given half-open interval
assert isinstance(minValue, float)
if maxValue is None:
return lambda x: minValue <= x
else:
assert isinstance(maxValue, float)
return lambda x: minValue <= x < maxValue
#Core options
setOption("jobStore",
parsingFn=lambda x: os.path.abspath(x) if options.jobStore.startswith('.') else x)
#TODO: LOG LEVEL STRING
setOption("workDir")
setOption("stats")
setOption("cleanWorkDir")
setOption("clean")
if self.stats:
if self.clean != "never" and self.clean is not None:
raise RuntimeError("Contradicting options passed: Clean flag is set to %s "
"despite the stats flag requiring "
"the jobStore to be intact at the end of the run. "
"Set clean to \'never\'" % self.clean)
self.clean = "never"
elif self.clean is None:
self.clean = "onSuccess"
#Restarting the workflow options
setOption("restart")
#Batch system options
setOption("batchSystem")
setOption("scale", float, fC(0.0))
setOption("mesosMasterAddress")
setOption("parasolCommand")
setOption("parasolMaxBatches", int, iC(1))
setOption("environment", parseSetEnv)
#Autoscaling options
setOption("provisioner")
setOption("preemptableNodeOptions")
setOption("minPreemptableNodes", int)
setOption("maxPreemptableNodes", int)
setOption("nodeOptions")
setOption("minNodes", int)
setOption("maxNodes", int)
setOption("alphaPacking", float)
setOption("betaInertia", float)
setOption("scaleInterval", float)
#Resource requirements
setOption("defaultMemory", h2b, iC(1))
setOption("defaultCores", float, fC(1.0))
setOption("defaultDisk", h2b, iC(1))
setOption("defaultCache", h2b, iC(0))
setOption("maxCores", int, iC(1))
setOption("maxMemory", h2b, iC(1))
setOption("maxDisk", h2b, iC(1))
setOption("defaultPreemptable")
#Retrying/rescuing jobs
setOption("retryCount", int, iC(0))
setOption("maxJobDuration", int, iC(1))
setOption("rescueJobsFrequency", int, iC(1))
#Misc
setOption("maxLogFileSize", h2b, iC(1))
def checkSse(sseKey):
with open(sseKey) as f:
assert(len(f.readline().rstrip()) == 32)
setOption("sseKey", checkFn=checkSse)
setOption("cseKey", checkFn=checkSse)
setOption("servicePollingInterval", float, fC(0.0))
#Debug options
setOption("badWorker", float, fC(0.0, 1.0))
setOption("badWorkerFailInterval", float, fC(0.0)) | AssertionError | dataset/ETHPy150Open BD2KGenomics/toil/src/toil/common.py/Config.setOptions |
4,693 | @staticmethod
def loadOrCreateJobStore(jobStoreString, config=None):
"""
Loads an existing jobStore if it already exists. Otherwise a new instance of a jobStore is
created and returned.
:param str jobStoreString: see exception message below
:param toil.common.Config config: see AbstractJobStore.__init__
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: jobStores.abstractJobStore.AbstractJobStore
"""
if jobStoreString[0] in '/.':
jobStoreString = 'file:' + jobStoreString
try:
jobStoreName, jobStoreArgs = jobStoreString.split(':', 1)
except __HOLE__:
raise RuntimeError(
'Job store string must either be a path starting in . or / or a contain at least one '
'colon separating the name of the job store implementation from an initialization '
'string specific to that job store. If a path starting in . or / is passed, the file '
'job store will be used for backwards compatibility.' )
if jobStoreName == 'file':
from toil.jobStores.fileJobStore import FileJobStore
return FileJobStore(jobStoreArgs, config=config)
elif jobStoreName == 'aws':
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore.loadOrCreateJobStore(jobStoreArgs, config=config)
elif jobStoreName == 'azure':
from toil.jobStores.azureJobStore import AzureJobStore
account, namePrefix = jobStoreArgs.split(':', 1)
return AzureJobStore(account, namePrefix, config=config)
else:
raise RuntimeError("Unknown job store implementation '%s'" % jobStoreName) | ValueError | dataset/ETHPy150Open BD2KGenomics/toil/src/toil/common.py/Toil.loadOrCreateJobStore |
4,694 | @staticmethod
def getWorkflowDir(workflowID, configWorkDir=None):
"""
Returns a path to the directory where worker directories and the cache will be located for this
workflow.
:param str workflowID: Unique identifier for the workflow
:param str configWorkDir: Value passed to the program using the --workDir flag
:return: Path to the workflow directory
:rtype: str
"""
workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir()
if not os.path.exists(workDir):
raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not "
"exist." % workDir)
# Create the workflow dir
workflowDir = os.path.join(workDir, 'toil-%s' % workflowID)
try:
# Directory creation is atomic
os.mkdir(workflowDir)
except __HOLE__ as err:
if err.errno != 17:
# The directory exists if a previous worker set it up.
raise
else:
logger.info('Created the workflow directory at %s' % workflowDir)
return workflowDir | OSError | dataset/ETHPy150Open BD2KGenomics/toil/src/toil/common.py/Toil.getWorkflowDir |
4,695 | def parseSetEnv(l):
"""
Parses a list of strings of the form "NAME=VALUE" or just "NAME" into a dictionary. Strings
of the latter from will result in dictionary entries whose value is None.
:type l: list[str]
:rtype: dict[str,str]
>>> parseSetEnv([])
{}
>>> parseSetEnv(['a'])
{'a': None}
>>> parseSetEnv(['a='])
{'a': ''}
>>> parseSetEnv(['a=b'])
{'a': 'b'}
>>> parseSetEnv(['a=a', 'a=b'])
{'a': 'b'}
>>> parseSetEnv(['a=b', 'c=d'])
{'a': 'b', 'c': 'd'}
>>> parseSetEnv(['a=b=c'])
{'a': 'b=c'}
>>> parseSetEnv([''])
Traceback (most recent call last):
...
ValueError: Empty name
>>> parseSetEnv(['=1'])
Traceback (most recent call last):
...
ValueError: Empty name
"""
d = dict()
for i in l:
try:
k, v = i.split('=', 1)
except __HOLE__:
k, v = i, None
if not k:
raise ValueError('Empty name')
d[k] = v
return d | ValueError | dataset/ETHPy150Open BD2KGenomics/toil/src/toil/common.py/parseSetEnv |
4,696 | def _run_env(self, name, *args):
try:
self.log.debug('Run function {}'.format(name))
return getattr(self, name)(*args)
except __HOLE__ as e:
self.log.debug('The function {} does not exist'.format(name))
except BaseException as e:
self.log.warn(
'The test is skipped because {} raised an exception'
.format(name)
)
self.log.warn(e) | AttributeError | dataset/ETHPy150Open onitu/onitu/tests/utils/benchmark.py/Benchmark._run_env |
4,697 | def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except __HOLE__:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path)) | KeyError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Scanner/__init__.py/FindPathDirs.__call__ |
4,698 | def __cmp__(self, other):
try:
return cmp(self.__dict__, other.__dict__)
except __HOLE__:
# other probably doesn't have a __dict__
return cmp(self.__dict__, other) | AttributeError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Scanner/__init__.py/Base.__cmp__ |
4,699 | def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except __HOLE__:
return None
else:
return self | KeyError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Scanner/__init__.py/Base.select |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.