Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
5,400
def test_wrong_initial_size(self): # Give a clear error message if an array variable changes size at # runtime compared to its initial value used to size the framework arrays t = set_as_top(ArrayAsmb()) t.source.out = np.zeros(2) try: t.run() except __HOLE__ as err: self.assertEqual(str(err), "Array size mis-match in 'source.out'. Initial shape was (2,) but found size (5,) at runtime") else: self.fail('RuntimeError expected')
RuntimeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_system.py/TestArrayConnectErrors.test_wrong_initial_size
5,401
def TranslationTool( model_inst, lint=False, enable_blackbox=False, verilator_xinit="zeros" ): """Translates a PyMTL model into Python-wrapped Verilog. model_inst: an un-elaborated Model instance lint: run verilator linter, warnings are fatal (disables -Wno-lint flag) enable_blackbox: also generate a .v file with black boxes """ model_inst.elaborate() # Translate the PyMTL module to Verilog, if we've already done # translation check if there's been any changes to the source model_name = model_inst.class_name verilog_file = model_name + '.v' temp_file = model_name + '.v.tmp' c_wrapper_file = model_name + '_v.cpp' py_wrapper_file = model_name + '_v.py' lib_file = 'lib{}_v.so'.format( model_name ) obj_dir = 'obj_dir_' + model_name blackbox_file = model_name + '_blackbox' + '.v' vcd_en = True vcd_file = '' try: vcd_en = ( model_inst.vcd_file != '' ) vcd_file = model_inst.vcd_file except __HOLE__: vcd_en = False # Write the output to a temporary file with open( temp_file, 'w+' ) as fd: verilog.translate( model_inst, fd, verilator_xinit=verilator_xinit ) # write Verilog with black boxes if enable_blackbox: with open( blackbox_file, 'w+' ) as fd: verilog.translate( model_inst, fd, enable_blackbox=True, verilator_xinit=verilator_xinit ) # Check if the temporary file matches an existing file (caching) cached = False if ( exists(verilog_file) and exists(py_wrapper_file) and exists(lib_file) and exists(obj_dir) ): cached = filecmp.cmp( temp_file, verilog_file ) # if not cached: # os.system( ' diff %s %s'%( temp_file, verilog_file )) # Rename temp to actual output os.rename( temp_file, verilog_file ) # Verilate the module only if we've updated the verilog source if not cached: #print( "NOT CACHED", verilog_file ) verilog_to_pymtl( model_inst, verilog_file, c_wrapper_file, lib_file, py_wrapper_file, vcd_en, lint, verilator_xinit ) #else: # print( "CACHED", verilog_file ) # Use some trickery to import the verilated version of the model sys.path.append( os.getcwd() ) __import__( py_wrapper_file[:-3] ) imported_module = sys.modules[ py_wrapper_file[:-3] ] # Get the model class from the module, instantiate and elaborate it model_class = imported_module.__dict__[ model_name ] model_inst = model_class() if vcd_en: model_inst.vcd_file = vcd_file return model_inst
AttributeError
dataset/ETHPy150Open cornell-brg/pymtl/pymtl/tools/translation/verilator_sim.py/TranslationTool
5,402
def _bootstrap(self): from . import util global _current_process try: self._children = set() self._counter = itertools.count(1) try: os.close(sys.stdin.fileno()) except (__HOLE__, ValueError): pass _current_process = self util._finalizer_registry.clear() util._run_after_forkers() util.info('child process calling self.run()') try: self.run() exitcode = 0 finally: util._exit_function() except SystemExit, e: if not e.args: exitcode = 1 elif type(e.args[0]) is int: exitcode = e.args[0] else: sys.stderr.write(e.args[0] + '\n') sys.stderr.flush() exitcode = 1 except: exitcode = 1 import traceback sys.stderr.write('Process %s:\n' % self.name) sys.stderr.flush() traceback.print_exc() util.info('process exiting with exitcode %d' % exitcode) return exitcode # # We subclass bytes to avoid accidental transmission of auth keys over network #
OSError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/multiprocessing/process.py/Process._bootstrap
5,403
def provide_batch(self): if self.done: return 0, None if not self.f: try: self.f = open(self.spec.replace(BSON_SCHEME, "")) except __HOLE__, e: return "error: could not open bson: %s; exception: %s" % \ (self.spec, e), None batch = pump.Batch(self) batch_max_size = self.opts.extra['batch_max_size'] batch_max_bytes = self.opts.extra['batch_max_bytes'] cmd = memcacheConstants.CMD_TAP_MUTATION vbucket_id = 0x0000ffff cas, exp, flg = 0, 0, 0 while (self.f and batch.size() < batch_max_size and batch.bytes < batch_max_bytes): doc_size_buf = self.f.read(4) if not doc_size_buf: self.done = True self.f.close() self.f = None break doc_size, = struct.unpack("<i", doc_size_buf) doc_buf = self.f.read(doc_size - 4) if not doc_buf: self.done = True self.f.close() self.f = None break doc = bson._elements_to_dict(doc_buf, dict, True) key = doc['_id'] doc_json = json.dumps(doc) msg = (cmd, vbucket_id, key, flg, exp, cas, '', doc_json) batch.append(msg, len(doc)) if batch.size() <= 0: return 0, None return 0, batch
IOError
dataset/ETHPy150Open membase/membase-cli/pump_bson.py/BSONSource.provide_batch
5,404
def __init__(self, extra_vars_func=None, options=None): self.get_extra_vars = extra_vars_func if options is None: options = {} self.options = options self.default_encoding = options.get('genshi.default_encoding', None) auto_reload = options.get('genshi.auto_reload', '1') if isinstance(auto_reload, str): auto_reload = auto_reload.lower() in ('1', 'on', 'yes', 'true') search_path = [p for p in options.get('genshi.search_path', '').split(':') if p] self.use_package_naming = not search_path try: max_cache_size = int(options.get('genshi.max_cache_size', 25)) except __HOLE__: raise ConfigurationError('Invalid value for max_cache_size: "%s"' % options.get('genshi.max_cache_size')) loader_callback = options.get('genshi.loader_callback', None) if loader_callback and not hasattr(loader_callback, '__call__'): raise ConfigurationError('loader callback must be a function') lookup_errors = options.get('genshi.lookup_errors', 'strict') if lookup_errors not in ('lenient', 'strict'): raise ConfigurationError('Unknown lookup errors mode "%s"' % lookup_errors) try: allow_exec = bool(options.get('genshi.allow_exec', True)) except ValueError: raise ConfigurationError('Invalid value for allow_exec "%s"' % options.get('genshi.allow_exec')) self.loader = TemplateLoader([p for p in search_path if p], auto_reload=auto_reload, max_cache_size=max_cache_size, default_class=self.template_class, variable_lookup=lookup_errors, allow_exec=allow_exec, callback=loader_callback)
ValueError
dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python3/genshi/template/plugin.py/AbstractTemplateEnginePlugin.__init__
5,405
def Collect(infile, with_headers=False, annotator_format=False, use_annotator_fdr=False, delims="", ignore="", max_pvalue=1.0, max_qvalue=None): """read input table.""" data = [] lines = filter(lambda x: x[0] != "#", infile.readlines()) if len(lines) == 0: return data if with_headers: del lines[0] if annotator_format: lines = [line for line in lines if not line.startswith("Iteration")] annotator_fdr = {} annotator_level = None for line in lines: if len(line) == 1: continue # skip trailing blank lines if line.startswith("--"): if line.startswith("-- False"): annotator_level = float( re.search("-- False Discovery summary for p-value (.+):", line).groups()[0]) annotator_fdr[annotator_level] = {} elif line.startswith("-- Category"): pass else: if re.search("insufficiently", line): continue dd = re.split("\s+", line[4:-1]) d = DataFDR() d.mObserved, d.mAverage, d.mMedian, d.m95 = map( float, dd[1:]) annotator_fdr[annotator_level][dd[0]] = d continue else: if line[0] == "Z": continue # skip header if len(line[:-1].split('\t')) != 9: continue # HACK: accounts for a bug in Annotator output try: (z, percentchange, pvalue, observed, expected, low95, up95, stddev, description) = line[:-1].split('\t')[:9] except __HOLE__: raise ValueError("# parsing error in line: %s" % line[:-1]) d = DataPoint() d.mAnnotation = description d.mPValue = float(pvalue) d.mFoldChange = 1.0 + float(percentchange) / 100.0 data.append(d) else: for line in lines: try: (code, goid, scount, stotal, spercent, bcount, btotal, bpercent, ratio, pover, punder, goid, category, description) = line[:-1].split("\t")[:14] except ValueError: raise ValueError("# parsing error in line: %s" % line[:-1]) if code == "+": p = pover else: p = punder d = DataPoint() d.mAnnotation = description d.mPValue = float(p) d.mFoldChange = float(spercent) / float(bpercent) data.append(d) # apply filters for c in delims: for d in data: d.mAnnotation = d.mAnnotation.split(c)[0] for c in ignore: for d in data: d.mAnnotation = d.mAnnotation.replace(c, '') ninput = len(data) no_fdr = False # apply filters if ninput > 0: if max_qvalue is not None: if use_annotator_fdr: pvalues = annotator_fdr.keys() pvalues.sort() pvalues.reverse() for pvalue in pvalues: try: d = annotator_fdr[pvalue]["Significant"] except KeyError: continue if d.mObserved == 0: E.info("no data remaining after fdr filtering") data = [] break elif d.mAverage / d.mObserved < max_qvalue: E.info("filtering with P-value of %f" % pvalue) data = [x for x in data if x.mPValue < pvalue] break else: E.warn("fdr could not be computed - compute more " "samples (at P = %f, actual fdr=%f)" % (pvalue, d.mAverage / d.mObserved)) no_fdr = True if no_fdr: if use_annotator_fdr: E.info("estimating FDR from observed P-Values") pvalues = [x.mPValue for x in data] vlambda = numpy.arange(0, max(pvalues), 0.05) try: qvalues = Stats.doFDR( pvalues, vlambda=vlambda, fdr_level=max_qvalue) except ValueError, msg: E.warn( "fdr could not be computed - no filtering: %s" % msg) no_fdr = True else: data = [x[0] for x in zip(data, qvalues.mPassed) if x[1]] elif max_pvalue is not None: data = [x for x in data if x.mPValue < max_pvalue] if no_fdr: data = [] nremoved = ninput - len(data) return data, nremoved, no_fdr # some definitions for the layout of the picture
ValueError
dataset/ETHPy150Open CGATOxford/cgat/scripts/go2svg.py/Collect
5,406
def addValue(self, row, col, size, colour_value): """add a dot in row/col. """ # decide the size of the box pos = bisect.bisect(self.mThresholdsSize, size) if self.mRevertSize: size = self.mMaxBoxSize * \ (1.0 - float(pos) / len(self.mThresholdsSize)) else: size = self.mMaxBoxSize * float(pos) / len(self.mThresholdsSize) d = (self.mMaxBoxSize - size) / 2 x = self.mMapCol2Position[col] + d try: y = self.mMapRow2Position[row] + d except __HOLE__: return # determine the colour of the box pos = bisect.bisect(self.mThresholdsColour, colour_value) colour = self.mColours[pos] e = SVGdraw.rect(x, y, size, size, stroke="black", fill="rgb(%i,%i,%i)" % colour) self.mElements.append(e)
KeyError
dataset/ETHPy150Open CGATOxford/cgat/scripts/go2svg.py/GoPlot.addValue
5,407
def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv parser = E.OptionParser( version="%prog version: $Id") parser.add_option("-e", "--header-names", dest="headers", action="store_true", help="first row is a header [ignored].") parser.add_option("-t", "--title", dest="title", type="string", help="page title.") parser.add_option("-f", "--footer", dest="footer", type="string", help="page footer.") parser.add_option("--maxP", dest="max_pvalue", type="float", help="maximum P-value displayed [default=%default].") parser.add_option("--maxQ", dest="max_qvalue", type="float", help="maximum Q-value for controlling for FDR [default=%default].") parser.add_option("-c", "--column-titles", dest="col_names", type="string", help="comma separated list of column titles [default: use filenames].") parser.add_option("-p", "--pattern-filename", dest="pattern_filename", type="string", help="pattern to map columns to filename.") parser.add_option("-A", "--Annotator", dest="annotator", action="store_true", help="use Annotator-style input files.") parser.add_option("--annotator-fdr", dest="annotator_fdr", action="store_true", help="use fdr computed from annotator [default=%default].") parser.add_option("-T", "--thresholds", dest="thresholds", type="string", help="7 comma-separated fold-change threshold values") parser.add_option("-P", "--pvalues", dest="pvalues", type="string", help="6 comma-separated p value threshold values"), parser.add_option("-C", "--altcolours", dest="altcolours", action="store_true", help="Use alternative colour palette") parser.add_option("-X", "--delimiters", dest="delims", type="string", help="Delimiter characters for annotation label") parser.add_option("-Z", "--ignore", dest="ignore", type="string", help="Ignored characters in annotation label") parser.add_option("--fdr", dest="fdr", type="float", help="filter output by FDR (requires annotator output). [default=%default]") parser.add_option("-a", "--template", dest="template", type="choice", choices=("screen", "publication"), help="layout template to choose - affects colours.") parser.add_option("--sort-columns", dest="sort_columns", type="choice", choices=("unsorted", "similarity", "alphabetical", ), help="sort columns. The default, unsorted, list columns in the order that they are supplied on the command line [default=%default]") parser.set_defaults( sortAlphabetically=True, headers=False, col_names="", pattern_filename=None, title="", footer="", max_pvalue=None, max_qvalue=None, annotator=False, thresholds="0.25,0.33,0.5,1.0,2.0,3.0,4.0", pvalues="0.00001,0.0001,0.001,0.01,0.1", altcolours=False, delims="", ignore="", template="screen", annotator_fdr=False, fdr=None, sort_columns="unsorted", ) (options, args) = E.Start(parser, add_pipe_options=True) if len(args) == 0: raise IOError("Please supply at least one input file.") if options.pattern_filename: input = [] col_names = args for x in col_names: input.append(options.pattern_filename % x) else: input = args if options.col_names: col_names = options.col_names.split(",") if len(col_names) != len(input): raise ValueError( "Number of col_names and files different: %i != %i" % (len(col_names), len(input))) else: col_names = input E.info("reading data for %i columns" % len(input)) columns = [] errors = [] for col_name, filename in zip(col_names, input): E.debug("reading data for column %s from %s " % (col_name, filename)) # collect all columns try: values, nremoved, no_fdr = Collect( open(filename, "r"), with_headers=options.headers, annotator_format=options.annotator, delims=options.delims, ignore=options.ignore, use_annotator_fdr=options.annotator_fdr, max_pvalue=options.max_pvalue, max_qvalue=options.max_qvalue) except __HOLE__: E.warn("no data from %s" % filename) values = [] no_fdr = False nremoved = 0 E.info("read %i values from %s: %i significant, %i removed" % (len(values) + nremoved, filename, len(values), nremoved)) columns.append((col_name, values)) errors.append(no_fdr) if sum([len(x) for x in columns]) == 0: raise IOError("no data read - please check supplied files.") # collect all annotations # Also filter for max pvalue annotations = set() for col_name, column in columns: for d in column: annotations.add(d.mAnnotation) E.info("There are %i rows" % len(annotations)) # sort and filter annotations # (Code removed which did some filtering; the annotations data is not used) # By removing labels from annlist you can select the annotations you want # to display row_names = list(annotations) if options.sortAlphabetically: row_names.sort() if options.sort_columns == "unsorted": pass elif options.sort_columns == "alphabetical": col_names.sort() elif options.sort_columns == "similarity": if len(row_names) * len(col_names) > 10000: E.info("no sorting as matrix too large") else: matrix = numpy.ones((len(row_names), len(col_names)), numpy.float) map_rows = dict(zip(row_names, range(len(row_names)))) x = 0 for col_name, column in columns: for d in column: matrix[map_rows[d.mAnnotation], x] = d.mFoldChange x += 1 row_indices, col_indices = CorrespondenceAnalysis.GetIndices( matrix) map_row_new2old = numpy.argsort(row_indices) map_col_new2old = numpy.argsort(col_indices) row_names = [row_names[map_row_new2old[x]] for x in range(len(row_names))] col_names = [col_names[map_col_new2old[x]] for x in range(len(col_names))] E.info("columns have been sorted") plot = GoPlot(row_names, col_names, thresholds_size=tuple( map(float, options.pvalues.split(','))), thresholds_colour=tuple( map(float, options.thresholds.split(','))), template=options.template, alt_colours=options.altcolours, max_pvalue=options.max_pvalue, max_qvalue=options.max_qvalue, mark_columns=errors) if options.title: plot.setTitle(options.title) if options.footer: plot.setFooter(options.footer) plot.initializePlot() for col_name, column in columns: for d in column: plot.addValue(d.mAnnotation, col_name, d.mPValue, d.mFoldChange) plot.writeToFile(options.stdout) E.Stop()
IOError
dataset/ETHPy150Open CGATOxford/cgat/scripts/go2svg.py/main
5,408
def _stop_current_lights(self): if self.debug: self.log.debug("Stopping current lights. Show: %s", self.running_light_show) try: self.running_light_show.stop(hold=False, reset=False) except __HOLE__: pass if self.debug: self.log.debug("Setting current light show to: None") self.running_light_show = None
AttributeError
dataset/ETHPy150Open missionpinball/mpf/mpf/devices/shot.py/Shot._stop_current_lights
5,409
def hit(self, mode='default#$%', waterfall_hits=None, **kwargs): """Method which is called to indicate this shot was just hit. This method will advance the currently-active shot profile. Args: force: Boolean that forces this hit to be registered. Default is False which means if there are no balls in play (e.g. after a tilt) then this hit isn't processed. Set this to True if you want to force the hit to be processed even if no balls are in play. Note that the shot must be enabled in order for this hit to be processed. """ if (not self.machine.game or (self.machine.game and not self.machine.game.balls_in_play) or self.active_delay_switches): return if mode == 'default#$%': mode = self.active_mode profile, state = self.get_mode_state(mode) if self.debug: self.log.debug("Hit! Mode: %s, Profile: %s, State: %s", mode, profile, state) # do this before the events are posted since events could change the # profile if not self.enable_table[mode]['settings']['block']: need_to_waterfall = True else: need_to_waterfall = False # post events self.machine.events.post(self.name + '_hit', profile=profile, state=state) self.machine.events.post(self.name + '_' + profile + '_hit', profile=profile, state=state) self.machine.events.post(self.name + '_' + profile + '_' + state + '_hit', profile=profile, state=state) # Need to try because the event postings above could be used to stop # the mode, in which case the mode entry won't be in the enable_table try: advance = self.enable_table[mode]['settings']['advance_on_hit'] except __HOLE__: advance = False if advance: if self.debug: self.log.debug("Mode '%s' advance_on_hit is True.", mode) self.advance(mode=mode) elif self.debug: self.log.debug('Not advancing profile state since the current ' 'mode %s has setting advance_on_hit set to ' 'False or this mode is not in the enable_table', mode) for group in [x for x in self.groups]: self.log.debug("Notifying shot_group %s of new hit", group) group.hit(mode, profile, state) if Shot.monitor_enabled: for callback in self.machine.monitors['shots']: callback(name=self.name, profile=profile, state=state) if need_to_waterfall: if self.debug: self.log.debug('%s block: False. Waterfalling hits', mode) if not waterfall_hits: waterfall_hits = set() self._waterfall_hits(mode, waterfall_hits.add(profile)) elif self.debug: self.log.debug('%s settings has block enabled', mode)
KeyError
dataset/ETHPy150Open missionpinball/mpf/mpf/devices/shot.py/Shot.hit
5,410
def update_enable_table(self, profile=None, enable=None, mode=None): if mode: priority = mode.priority else: priority = 0 if not profile: try: profile = self.enable_table[mode]['profile'] except KeyError: profile = self.config['profile'] if not enable: try: enable = self.enable_table[mode]['enable'] except __HOLE__: enable = False profile_settings = ( self.machine.shot_profile_manager.profiles[profile].copy()) profile_settings['player_variable'] = ( profile_settings['player_variable'].replace('%', self.name)) this_entry = {'priority': priority, 'profile': profile, 'enable': enable, 'settings': profile_settings, 'current_state_name': None } if self.debug: self.log.debug("Updating the entry table with: %s:%s", mode, this_entry) self.enable_table[mode] = this_entry self.update_current_state_name(mode) self._sort_enable_table()
KeyError
dataset/ETHPy150Open missionpinball/mpf/mpf/devices/shot.py/Shot.update_enable_table
5,411
def remove_from_enable_table(self, mode): if self.debug: self.log.debug("Removing mode: %s from enable_table", mode) try: del self.enable_table[mode] self._sort_enable_table() except __HOLE__: pass
KeyError
dataset/ETHPy150Open missionpinball/mpf/mpf/devices/shot.py/Shot.remove_from_enable_table
5,412
def update_current_state_name(self, mode): if self.debug: self.log.debug("Old current state name for mode %s: %s", mode, self.enable_table[mode]['current_state_name']) try: self.enable_table[mode]['current_state_name'] = ( self.enable_table[mode]['settings']['states'] [self.player[self.enable_table[mode]['settings'] ['player_variable']]]['name']) except __HOLE__: self.enable_table[mode]['current_state_name'] = None if self.debug: self.log.debug("New current state name for mode %s: %s", mode, self.enable_table[mode]['current_state_name'])
TypeError
dataset/ETHPy150Open missionpinball/mpf/mpf/devices/shot.py/Shot.update_current_state_name
5,413
def add_to_group(self, group): if self.debug: self.log.debug("Received request to add this shot to the %s group", group) if type(group) is str: try: group = self.machine.shot_groups[group] except __HOLE__: if self.debug: self.log.debug("'%s' is not a valid shot_group name.", group) return self.groups.add(group)
KeyError
dataset/ETHPy150Open missionpinball/mpf/mpf/devices/shot.py/Shot.add_to_group
5,414
def remove_from_group(self, group): if self.debug: self.log.debug("Received request to remove this shot from the %s " "group", group) if type(group) is str: try: group = self.machine.shot_groups[group] except __HOLE__: if self.debug: self.log.debug("'%s' is not a valid shot_group name.", group) return self.groups.discard(group)
KeyError
dataset/ETHPy150Open missionpinball/mpf/mpf/devices/shot.py/Shot.remove_from_group
5,415
def run(self): try: self.conn.main() except __HOLE__: print('Exiting on keyboard interrupt')
KeyboardInterrupt
dataset/ETHPy150Open acrisci/i3ipc-python/examples/stop-application-on-unfocus.py/FocusMonitor.run
5,416
def search(name, year=None): if name is None or name == '': raise Exception if isinstance(name, unicode): name = name.encode('utf8') endpoint = TMDB_HOST + '/search/movie' payload = {'api_key': TMDB_API_KEY, 'query': urllib.quote_plus(str(name))} if year is not None: payload['year'] = year try: response = requests.get(endpoint, params=payload, timeout=5) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError): raise Exception try: result = json.loads(response.text) return result['results'] except __HOLE__: raise Exception
ValueError
dataset/ETHPy150Open divijbindlish/movienamer/movienamer/tmdb.py/search
5,417
def value_from_object(self, obj): """If the field template is a :class:`DateField` or a :class:`DateTimeField`, this will convert the default return value to a datetime instance.""" value = super(JSONAttribute, self).value_from_object(obj) if isinstance(self.field_template, (models.DateField, models.DateTimeField)): try: value = self.field_template.to_python(value) except __HOLE__: value = None return value
ValidationError
dataset/ETHPy150Open ithinksw/philo/philo/models/fields/entities.py/JSONAttribute.value_from_object
5,418
def handle_noargs(self, **options): database = options.get('database') connection = connections[database] verbosity = int(options.get('verbosity')) interactive = options.get('interactive') # The following are stealth options used by Django's internals. reset_sequences = options.get('reset_sequences', True) allow_cascade = options.get('allow_cascade', False) inhibit_post_syncdb = options.get('inhibit_post_syncdb', False) self.style = no_style() # Import the 'management' module within each installed app, to register # dispatcher events. for app_name in settings.INSTALLED_APPS: try: import_module('.management', app_name) except __HOLE__: pass sql_list = sql_flush(self.style, connection, only_django=True, reset_sequences=reset_sequences, allow_cascade=allow_cascade) if interactive: confirm = input("""You have requested a flush of the database. This will IRREVERSIBLY DESTROY all data currently in the %r database, and return each table to the state it was in after syncdb. Are you sure you want to do this? Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME']) else: confirm = 'yes' if confirm == 'yes': try: with transaction.atomic(using=database, savepoint=connection.features.can_rollback_ddl): cursor = connection.cursor() for sql in sql_list: cursor.execute(sql) except Exception as e: new_msg = ( "Database %s couldn't be flushed. Possible reasons:\n" " * The database isn't running or isn't configured correctly.\n" " * At least one of the expected database tables doesn't exist.\n" " * The SQL was invalid.\n" "Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.\n" "The full error: %s") % (connection.settings_dict['NAME'], e) six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2]) if not inhibit_post_syncdb: self.emit_post_syncdb(verbosity, interactive, database) # Reinstall the initial_data fixture. if options.get('load_initial_data'): # Reinstall the initial_data fixture. call_command('loaddata', 'initial_data', **options) else: self.stdout.write("Flush cancelled.\n")
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/management/commands/flush.py/Command.handle_noargs
5,419
def get_suffixes( txt_file, suffix_file, stdout_file="", threshold=sys.maxint, prefix=u'__' ): """ Replace all words in <txt_file> with suffixes where possible. Set of suffixes must be provided in <suffix_file> The new corpus is written to <stdout_file> or to standard output if no file provided <prefix> -- string to replace the non-suffix part of the word (default '__': information -> __tion) Words are replaced with suffixes only if occurred in corpus less times than <threshold> Default: no threshold (all words replaced) """ out = open( stdout_file, 'w' ) if stdout_file else sys.stdout sys.stderr.write('Loading corpus\n') my_corp = corpora.TextCorpus(txt_file) sys.stderr.write('Building suffix list\n') suffix_list = form_suffix_list(suffix_file) sys.stderr.write('Suffix search\n') #replace only words that occur in corpus less times than threshold #default - no threshold (all words are replaced with suffix) dict_copy = dict( [ (token,find_suffix(token, suffix_list, prefix=prefix)) if my_corp.dictionary.dfs[id] < threshold else (token,token) for (id, token) in my_corp.dictionary.items() ] ) print dict_copy sys.stderr.write('Output\n') cnt = 0 in_file = open(txt_file) for line in in_file: cnt += 1 if cnt%10000 == 0: sys.stderr.write('.') words = line[:-1].decode('utf-8').split() for w in words: try: out.write("%s " % dict_copy[w].encode('utf-8')) except __HOLE__: dict_copy[w] = w out.write("%s " % dict_copy[w].encode('utf-8')) out.write("\n") in_file.close() if stdout_file: out.close()
KeyError
dataset/ETHPy150Open qe-team/marmot/marmot/preprocessing/get_suffixes.py/get_suffixes
5,420
def catch_notimplementederror(f): """Decorator to simplify catching drivers raising NotImplementedError If a particular call makes a driver raise NotImplementedError, we log it so that we can extract this information afterwards as needed. """ def wrapped_func(self, *args, **kwargs): try: return f(self, *args, **kwargs) except __HOLE__: frame = traceback.extract_tb(sys.exc_info()[2])[-1] LOG.error("%(driver)s does not implement %(method)s " "required for test %(test)s" % {'driver': type(self.connection), 'method': frame[2], 'test': f.__name__}) wrapped_func.__name__ = f.__name__ wrapped_func.__doc__ = f.__doc__ return wrapped_func
NotImplementedError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/tests/unit/virt/test_virt_drivers.py/catch_notimplementederror
5,421
def _check_available_resource_fields(self, host_status): super(FakeConnectionTestCase, self)._check_available_resource_fields( host_status) hypervisor_type = host_status['hypervisor_type'] supported_instances = host_status['supported_instances'] try: # supported_instances could be JSON wrapped supported_instances = jsonutils.loads(supported_instances) except __HOLE__: pass self.assertTrue(any(hypervisor_type in x for x in supported_instances))
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/tests/unit/virt/test_virt_drivers.py/FakeConnectionTestCase._check_available_resource_fields
5,422
def __contains__(self, item): try: self[item] return True except __HOLE__: return False
KeyError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Entity.__contains__
5,423
def __contains__(self, name): """Is there at least one entry called *name* in this collection? Makes a single roundtrip to the server, plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. """ try: self[name] return True except __HOLE__: return False except AmbiguousReferenceException: return True
KeyError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/ReadOnlyCollection.__contains__
5,424
def __getitem__(self, key): """Fetch an item named *key* from this collection. A name is not a unique identifier in a collection. The unique identifier is a name plus a namespace. For example, there can be a saved search named ``'mysearch'`` with sharing ``'app'`` in application ``'search'``, and another with sharing ``'user'`` with owner ``'boris'`` and application ``'search'``. If the ``Collection`` is attached to a ``Service`` that has ``'-'`` (wildcard) as user and app in its namespace, then both of these may be visible under the same name. Where there is no conflict, ``__getitem__`` will fetch the entity given just the name. If there is a conflict and you pass just a name, it will raise a ``ValueError``. In that case, add the namespace as a second argument. This function makes a single roundtrip to the server, plus at most two additional round trips if the ``autologin`` field of :func:`connect` is set to ``True``. :param key: The name to fetch, or a tuple (name, namespace). :return: An :class:`Entity` object. :raises KeyError: Raised if *key* does not exist. :raises ValueError: Raised if no namespace is specified and *key* does not refer to a unique name. *Example*:: s = client.connect(...) saved_searches = s.saved_searches x1 = saved_searches.create( 'mysearch', 'search * | head 1', owner='admin', app='search', sharing='app') x2 = saved_searches.create( 'mysearch', 'search * | head 1', owner='admin', app='search', sharing='user') # Raises ValueError: saved_searches['mysearch'] # Fetches x1 saved_searches[ 'mysearch', client.namespace(sharing='app', app='search')] # Fetches x2 saved_searches[ 'mysearch', client.namespace(sharing='user', owner='boris', app='search')] """ try: if isinstance(key, tuple) and len(key) == 2: # x[a,b] is translated to x.__getitem__( (a,b) ), so we # have to extract values out. key, ns = key key = UrlEncoded(key, encode_slash=True) response = self.get(key, owner=ns.owner, app=ns.app) else: key = UrlEncoded(key, encode_slash=True) response = self.get(key) entries = self._load_list(response) if len(entries) > 1: raise AmbiguousReferenceException("Found multiple entities named '%s'; please specify a namespace." % key) elif len(entries) == 0: raise KeyError(key) else: return entries[0] except __HOLE__ as he: if he.status == 404: # No entity matching key and namespace. raise KeyError(key) else: raise
HTTPError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/ReadOnlyCollection.__getitem__
5,425
def delete(self, name, **params): """Deletes a specified entity from the collection. :param name: The name of the entity to delete. :type name: ``string`` :return: The collection. :rtype: ``self`` This method is implemented for consistency with the REST API's DELETE method. If there is no *name* entity on the server, a ``KeyError`` is thrown. This function always makes a roundtrip to the server. **Example**:: import splunklib.client as client c = client.connect(...) saved_searches = c.saved_searches saved_searches.create('my_saved_search', 'search * | head 1') assert 'my_saved_search' in saved_searches saved_searches.delete('my_saved_search') assert 'my_saved_search' not in saved_searches """ name = UrlEncoded(name, encode_slash=True) if 'namespace' in params: namespace = params.pop('namespace') params['owner'] = namespace.owner params['app'] = namespace.app params['sharing'] = namespace.sharing try: self.service.delete(_path(self.path, name), **params) except __HOLE__ as he: # An HTTPError with status code 404 means that the entity # has already been deleted, and we reraise it as a # KeyError. if he.status == 404: raise KeyError("No such entity %s" % name) else: raise return self
HTTPError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Collection.delete
5,426
def __getitem__(self, key): # The superclass implementation is designed for collections that contain # entities. This collection (Configurations) contains collections # (ConfigurationFile). # # The configurations endpoint returns multiple entities when we ask for a single file. # This screws up the default implementation of __getitem__ from Collection, which thinks # that multiple entities means a name collision, so we have to override it here. try: response = self.get(key) return ConfigurationFile(self.service, PATH_CONF % key, state={'title': key}) except __HOLE__ as he: if he.status == 404: # No entity matching key raise KeyError(key) else: raise
HTTPError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Configurations.__getitem__
5,427
def __contains__(self, key): # configs/conf-{name} never returns a 404. We have to post to properties/{name} # in order to find out if a configuration exists. try: response = self.get(key) return True except __HOLE__ as he: if he.status == 404: # No entity matching key return False else: raise
HTTPError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Configurations.__contains__
5,428
def __getitem__(self, key): # The key needed to retrieve the input needs it's parenthesis to be URL encoded # based on the REST API for input # <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTinput> if isinstance(key, tuple) and len(key) == 2: # Fetch a single kind key, kind = key key = UrlEncoded(key, encode_slash=True) try: response = self.get(self.kindpath(kind) + "/" + key) entries = self._load_list(response) if len(entries) > 1: raise AmbiguousReferenceException("Found multiple inputs of kind %s named %s." % (kind, key)) elif len(entries) == 0: raise KeyError((key, kind)) else: return entries[0] except HTTPError as he: if he.status == 404: # No entity matching kind and key raise KeyError((key, kind)) else: raise else: # Iterate over all the kinds looking for matches. kind = None candidate = None key = UrlEncoded(key, encode_slash=True) for kind in self.kinds: try: response = self.get(kind + "/" + key) entries = self._load_list(response) if len(entries) > 1: raise AmbiguousReferenceException("Found multiple inputs of kind %s named %s." % (kind, key)) elif len(entries) == 0: pass else: if candidate is not None: # Already found at least one candidate raise AmbiguousReferenceException("Found multiple inputs named %s, please specify a kind" % key) candidate = entries[0] except __HOLE__ as he: if he.status == 404: pass # Just carry on to the next kind. else: raise if candidate is None: raise KeyError(key) # Never found a match. else: return candidate
HTTPError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Inputs.__getitem__
5,429
def __contains__(self, key): if isinstance(key, tuple) and len(key) == 2: # If we specify a kind, this will shortcut properly try: self.__getitem__(key) return True except __HOLE__: return False else: # Without a kind, we want to minimize the number of round trips to the server, so we # reimplement some of the behavior of __getitem__ in order to be able to stop searching # on the first hit. for kind in self.kinds: try: response = self.get(self.kindpath(kind) + "/" + key) entries = self._load_list(response) if len(entries) > 0: return True else: pass except HTTPError as he: if he.status == 404: pass # Just carry on to the next kind. else: raise return False
KeyError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Inputs.__contains__
5,430
def list(self, *kinds, **kwargs): """Returns a list of inputs that are in the :class:`Inputs` collection. You can also filter by one or more input kinds. This function iterates over all possible inputs, regardless of any arguments you specify. Because the :class:`Inputs` collection is the union of all the inputs of each kind, this method implements parameters such as "count", "search", and so on at the Python level once all the data has been fetched. The exception is when you specify a single input kind, and then this method makes a single request with the usual semantics for parameters. :param kinds: The input kinds to return (optional). - "ad": Active Directory - "monitor": Files and directories - "registry": Windows Registry - "script": Scripts - "splunktcp": TCP, processed - "tcp": TCP, unprocessed - "udp": UDP - "win-event-log-collections": Windows event log - "win-perfmon": Performance monitoring - "win-wmi-collections": WMI :type kinds: ``string`` :param kwargs: Additional arguments (optional): - "count" (``integer``): The maximum number of items to return. - "offset" (``integer``): The offset of the first item to return. - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` :return: A list of input kinds. :rtype: ``list`` """ if len(kinds) == 0: kinds = self.kinds if len(kinds) == 1: kind = kinds[0] logging.debug("Inputs.list taking short circuit branch for single kind.") path = self.kindpath(kind) logging.debug("Path for inputs: %s", path) try: path = UrlEncoded(path, skip_encode=True) response = self.get(path, **kwargs) except __HOLE__, he: if he.status == 404: # No inputs of this kind return [] entities = [] entries = _load_atom_entries(response) if entries is None: return [] # No inputs in a collection comes back with no feed or entry in the XML for entry in entries: state = _parse_atom_entry(entry) # Unquote the URL, since all URL encoded in the SDK # should be of type UrlEncoded, and all str should not # be URL encoded. path = urllib.unquote(state.links.alternate) entity = Input(self.service, path, kind, state=state) entities.append(entity) return entities search = kwargs.get('search', '*') entities = [] for kind in kinds: response = None try: kind = UrlEncoded(kind, skip_encode=True) response = self.get(self.kindpath(kind), search=search) except HTTPError as e: if e.status == 404: continue # No inputs of this kind else: raise entries = _load_atom_entries(response) if entries is None: continue # No inputs to process for entry in entries: state = _parse_atom_entry(entry) # Unquote the URL, since all URL encoded in the SDK # should be of type UrlEncoded, and all str should not # be URL encoded. path = urllib.unquote(state.links.alternate) entity = Input(self.service, path, kind, state=state) entities.append(entity) if 'offset' in kwargs: entities = entities[kwargs['offset']:] if 'count' in kwargs: entities = entities[:kwargs['count']] if kwargs.get('sort_mode', None) == 'alpha': sort_field = kwargs.get('sort_field', 'name') if sort_field == 'name': f = lambda x: x.name.lower() else: f = lambda x: x[sort_field].lower() entities = sorted(entities, key=f) if kwargs.get('sort_mode', None) == 'alpha_case': sort_field = kwargs.get('sort_field', 'name') if sort_field == 'name': f = lambda x: x.name else: f = lambda x: x[sort_field] entities = sorted(entities, key=f) if kwargs.get('sort_dir', 'asc') == 'desc': entities = list(reversed(entities)) return entities
HTTPError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Inputs.list
5,431
def cancel(self): """Stops the current search and deletes the results cache. :return: The :class:`Job`. """ try: self.post("control", action="cancel") except __HOLE__ as he: if he.status == 404: # The job has already been cancelled, so # cancelling it twice is a nop. pass else: raise return self
HTTPError
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/client.py/Job.cancel
5,432
def check(db, rev_id, page_id=None, radius=defaults.RADIUS, check_archive=False, before=None, window=None): """ Checks whether a revision was reverted (identity) and returns a named tuple of Revert(reverting, reverteds, reverted_to). :Parameters: db : `mw.database.DB` A database connection to make use of. rev_id : int the ID of the revision to check page_id : int the ID of the page the revision occupies (slower if not provided) radius : int a positive integer indicating the maximum number of revisions that can be reverted check_archive : bool should the archive table be checked for reverting revisions? before : `Timestamp` if set, limits the search for *reverting* revisions to those which were saved before this timestamp window : int if set, limits the search for *reverting* revisions to those which were saved within `window` seconds after the reverted edit """ if not hasattr(db, "revisions") and hasattr(db, "all_revisions"): raise TypeError("db wrong type. Expected a mw.database.DB.") rev_id = int(rev_id) radius = int(radius) if radius < 1: raise TypeError("invalid radius. Expected a positive integer.") page_id = none_or(page_id, int) check_archive = bool(check_archive) before = none_or(before, Timestamp) # If we are searching the archive, we'll need to use `all_revisions`. if check_archive: dbrevs = db.all_revisions else: dbrevs = db.revisions # If we don't have the sha1 or page_id, we're going to need to look them up if page_id is None: row = dbrevs.get(rev_id=rev_id) page_id = row['rev_page'] # Load history and current rev current_and_past_revs = list(dbrevs.query( page_id=page_id, limit=radius + 1, before_id=rev_id + 1, # Ensures that we capture the current revision direction="older" )) try: # Extract current rev and reorder history current_rev, past_revs = ( current_and_past_revs[0], # Current rev is the first one returned reversed(current_and_past_revs[1:]) # The rest are past revs, but they are in the wrong order ) except __HOLE__: # Only way to get here is if there isn't enough history. Couldn't be # reverted. Just return None. return None if window is not None and before is None: before = Timestamp(current_rev['rev_timestamp']) + window # Load future revisions future_revs = dbrevs.query( page_id=page_id, limit=radius, after_id=rev_id, before=before, direction="newer" ) # Convert to an iterable of (checksum, rev) pairs for detect() to consume checksum_revisions = chain( ((rev['rev_sha1'] if rev['rev_sha1'] is not None \ else DummyChecksum(), rev) for rev in past_revs), [(current_rev['rev_sha1'] or DummyChecksum(), current_rev)], ((rev['rev_sha1'] if rev['rev_sha1'] is not None \ else DummyChecksum(), rev) for rev in future_revs) ) for revert in detect(checksum_revisions, radius=radius): # Check that this is a relevant revert if rev_id in [rev['rev_id'] for rev in revert.reverteds]: return revert return None
IndexError
dataset/ETHPy150Open mediawiki-utilities/python-mediawiki-utilities/mw/lib/reverts/database.py/check
5,433
@exception.wrap_pecan_controller_exception @pecan.expose('json', content_type='application/json-patch+json') def patch(self, uuid): """Patch an existing CAMP-style plan.""" handler = (plan_handler. PlanHandler(pecan.request.security_context)) plan_obj = handler.get(uuid) # TODO(gilbert.pilz@oracle.com) check if there are any assemblies that # refer to this plan and raise an PlanStillReferenced exception if # there are. if not pecan.request.body or len(pecan.request.body) < 1: raise exception.BadRequest(reason='empty request body') # check to make sure the request has the right Content-Type if (pecan.request.content_type is None or pecan.request.content_type != 'application/json-patch+json'): raise exception.UnsupportedMediaType( name=pecan.request.content_type, method='PATCH') try: patch = jsonpatch.JsonPatch.from_string(pecan.request.body) patched_obj = patch.apply(plan_obj.refined_content()) db_obj = handler.update(uuid, patched_obj) except __HOLE__: # a key error indicates one of the patch operations is missing a # component raise exception.BadRequest(reason=MAL_PATCH_ERR) except jsonpatch.JsonPatchConflict: raise exception.Unprocessable except jsonpatch.JsonPatchException as jpe: raise JsonPatchProcessingException(reason=six.text_type(jpe)) return fluff_plan(db_obj.refined_content(), db_obj.uuid)
KeyError
dataset/ETHPy150Open openstack/solum/solum/api/controllers/camp/v1_1/plans.py/PlansController.patch
5,434
@exception.wrap_pecan_controller_exception @pecan.expose('json', content_type='application/x-yaml') def post(self): """Create a new CAMP-style plan.""" if not pecan.request.body or len(pecan.request.body) < 1: raise exception.BadRequest # check to make sure the request has the right Content-Type if (pecan.request.content_type is None or pecan.request.content_type != 'application/x-yaml'): raise exception.UnsupportedMediaType( name=pecan.request.content_type, method='POST') try: yaml_input_plan = yamlutils.load(pecan.request.body) except __HOLE__ as excp: raise exception.BadRequest(reason='Plan is invalid. ' + six.text_type(excp)) camp_version = yaml_input_plan.get('camp_version') if camp_version is None: raise exception.BadRequest( reason='camp_version attribute is missing from submitted Plan') elif camp_version != 'CAMP 1.1': raise exception.BadRequest(reason=UNSUP_VER_ERR % camp_version) # Use Solum's handler as the point of commonality. We can do this # because Solum stores plans in the DB in their JSON form. handler = (plan_handler. PlanHandler(pecan.request.security_context)) model_plan = model.Plan(**yaml_input_plan) # Move any inline Service Specifications to the "services" section. # This avoids an issue where WSME can't properly handle multi-typed # attributes (e.g. 'fulfillment'). It also smoothes out the primary # difference between CAMP plans and Solum plans, namely that Solum # plans don't have inline Service Specifications. for art in model_plan.artifacts: if art.requirements != wsme.Unset: for req in art.requirements: if (req.fulfillment != wsme.Unset and isinstance(req.fulfillment, model.ServiceSpecification)): s_spec = req.fulfillment # if the inline service spec doesn't have an id # generate one if s_spec.id == wsme.Unset: s_spec.id = str(uuid.uuid4()) # move the inline service spec to the 'services' # section if model_plan.services == wsme.Unset: model_plan.services = [s_spec] else: model_plan.services.append(s_spec) # set the fulfillment to the service spec id req.fulfillment = "id:%s" % s_spec.id db_obj = handler.create(clean_plan(wjson.tojson(model.Plan, model_plan))) plan_dict = fluff_plan(db_obj.refined_content(), db_obj.uuid) pecan.response.status = 201 pecan.response.location = plan_dict['uri'] return plan_dict
ValueError
dataset/ETHPy150Open openstack/solum/solum/api/controllers/camp/v1_1/plans.py/PlansController.post
5,435
def forward(app): app = TestApp(RecursiveMiddleware(app)) res = app.get('') assert res.header('content-type') == 'text/plain' assert res.full_status == '200 OK' assert 'requested page returned' in res res = app.get('/error') assert res.header('content-type') == 'text/plain' assert res.full_status == '200 OK' assert 'Page not found' in res res = app.get('/not_found') assert res.header('content-type') == 'text/plain' assert res.full_status == '200 OK' assert 'Page not found' in res try: res = app.get('/recurse') except __HOLE__ as e: if str(e).startswith('Forwarding loop detected'): pass else: raise AssertionError('Failed to detect forwarding loop')
AssertionError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/tests/test_recursive.py/forward
5,436
def test_ForwardRequest_factory(): from paste.errordocument import StatusKeeper class TestForwardRequestMiddleware(Middleware): def __call__(self, environ, start_response): if environ['PATH_INFO'] != '/not_found': return self.app(environ, start_response) environ['PATH_INFO'] = self.url def factory(app): return StatusKeeper(app, status='404 Not Found', url='/error', headers=[]) raise ForwardRequestException(factory=factory) app = TestForwardRequestMiddleware(error_docs_app) app = TestApp(RecursiveMiddleware(app)) res = app.get('') assert res.header('content-type') == 'text/plain' assert res.full_status == '200 OK' assert 'requested page returned' in res res = app.get('/error') assert res.header('content-type') == 'text/plain' assert res.full_status == '200 OK' assert 'Page not found' in res res = app.get('/not_found', status=404) assert res.header('content-type') == 'text/plain' assert res.full_status == '404 Not Found' # Different status assert 'Page not found' in res try: res = app.get('/recurse') except __HOLE__ as e: if str(e).startswith('Forwarding loop detected'): pass else: raise AssertionError('Failed to detect forwarding loop') # Test Deprecated Code
AssertionError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/tests/test_recursive.py/test_ForwardRequest_factory
5,437
def exist(self, item_name, index=None): if item_name in self.db: if index is not None: try: self.db[item_name]['value'][index] except __HOLE__: return False return True return False
IndexError
dataset/ETHPy150Open nyddle/pystash/pystash/common.py/ShelveStorage.exist
5,438
def _GetTempOutputFileHandles(self, value_type): """Initializes output AFF4Image for a given value type.""" try: return self.temp_output_trackers[value_type], False except __HOLE__: return self._CreateOutputFileHandles(value_type), True
KeyError
dataset/ETHPy150Open google/grr/grr/lib/output_plugins/bigquery_plugin.py/BigQueryOutputPlugin._GetTempOutputFileHandles
5,439
def __init__(self, repo, log, weak=True, git_bin='git', git_fs_encoding=None): self.logger = log with StorageFactory.__dict_lock: try: i = StorageFactory.__dict[repo] except __HOLE__: i = Storage(repo, log, git_bin, git_fs_encoding) StorageFactory.__dict[repo] = i # create or remove additional reference depending on 'weak' argument if weak: try: del StorageFactory.__dict_nonweak[repo] except KeyError: pass else: StorageFactory.__dict_nonweak[repo] = i self.__inst = i self.__repo = repo
KeyError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/StorageFactory.__init__
5,440
@staticmethod def git_version(git_bin="git"): GIT_VERSION_MIN_REQUIRED = (1, 5, 6) try: g = GitCore(git_bin=git_bin) [v] = g.version().splitlines() _, _, version = v.strip().split() # 'version' has usually at least 3 numeric version components, e.g.:: # 1.5.4.2 # 1.5.4.3.230.g2db511 # 1.5.4.GIT def try_int(s): try: return int(s) except __HOLE__: return s split_version = tuple(map(try_int, version.split('.'))) result = {} result['v_str'] = version result['v_tuple'] = split_version result['v_min_tuple'] = GIT_VERSION_MIN_REQUIRED result['v_min_str'] = ".".join(map(str, GIT_VERSION_MIN_REQUIRED)) result['v_compatible'] = split_version >= GIT_VERSION_MIN_REQUIRED return result except Exception, e: raise GitError("Could not retrieve GIT version" " (tried to execute/parse '%s --version' but got %s)" % (git_bin, repr(e)))
ValueError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.git_version
5,441
def get_rev_cache(self): """ Retrieve revision cache may rebuild cache on the fly if required returns RevCache tuple """ with self.__rev_cache_lock: if self.__rev_cache is None: # can be cleared by Storage.__rev_cache_sync() self.logger.debug("triggered rebuild of commit tree db for %d" % id(self)) ts0 = time.time() youngest = None oldest = None new_db = {} # db new_sdb = {} # short_rev db # helper for reusing strings __rev_seen = {} def __rev_reuse(rev): rev = str(rev) return __rev_seen.setdefault(rev, rev) new_tags = set(__rev_reuse(rev.strip()) for rev in self.repo.rev_parse("--tags").splitlines()) new_branches = [(k, __rev_reuse(v)) for k, v in self._get_branches()] head_revs = set(v for _, v in new_branches) rev = ord_rev = 0 for ord_rev, revs in enumerate(self.repo.rev_list("--parents", "--topo-order", "--all").splitlines()): revs = map(__rev_reuse, revs.strip().split()) rev = revs[0] # first rev seen is assumed to be the youngest one if not ord_rev: youngest = rev # shortrev "hash" map srev_key = self.__rev_key(rev) new_sdb.setdefault(srev_key, []).append(rev) # parents parents = tuple(revs[1:]) # new_db[rev] = (children(rev), parents(rev), ordinal_id(rev), rheads(rev)) if rev in new_db: # (incomplete) entry was already created by children _children, _parents, _ord_rev, _rheads = new_db[rev] assert _children assert not _parents assert _ord_rev == 0 if rev in head_revs and rev not in _rheads: _rheads.append(rev) else: # new entry _children = [] _rheads = [rev] if rev in head_revs else [] # create/update entry -- transform lists into tuples since entry will be final new_db[rev] = tuple(_children), tuple(parents), ord_rev + 1, tuple(_rheads) # update parents(rev)s for parent in parents: # by default, a dummy ordinal_id is used for the mean-time _children, _parents, _ord_rev, _rheads2 = new_db.setdefault(parent, ([], [], 0, [])) # update parent(rev)'s children if rev not in _children: _children.append(rev) # update parent(rev)'s rheads for rev in _rheads: if rev not in _rheads2: _rheads2.append(rev) # last rev seen is assumed to be the oldest one (with highest ord_rev) oldest = rev __rev_seen = None # convert sdb either to dict or array depending on size tmp = [()]*(max(new_sdb.keys())+1) if len(new_sdb) > 5000 else {} try: while True: k, v = new_sdb.popitem() tmp[k] = tuple(v) except __HOLE__: pass assert len(new_sdb) == 0 new_sdb = tmp # atomically update self.__rev_cache self.__rev_cache = Storage.RevCache(youngest, oldest, new_db, new_tags, new_sdb, new_branches) ts1 = time.time() self.logger.debug("rebuilt commit tree db for %d with %d entries (took %.1f ms)" % (id(self), len(new_db), 1000*(ts1-ts0))) assert all(e is not None for e in self.__rev_cache) or not any(self.__rev_cache) return self.__rev_cache # with self.__rev_cache_lock # see RevCache namedtuple
KeyError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.get_rev_cache
5,442
def get_branch_contains(self, sha, resolve=False): """ return list of reachable head sha ids or (names, sha) pairs if resolve is true see also get_branches() """ _rev_cache = self.rev_cache try: rheads = _rev_cache.rev_dict[sha][3] except __HOLE__: return [] if resolve: return [ (k, v) for k, v in _rev_cache.branch_dict if v in rheads ] return rheads
KeyError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.get_branch_contains
5,443
def fullrev(self, srev): "try to reverse shortrev()" srev = str(srev) _rev_cache = self.rev_cache # short-cut if len(srev) == 40 and srev in _rev_cache.rev_dict: return srev if not GitCore.is_sha(srev): return None try: srevs = _rev_cache.srev_dict[self.__rev_key(srev)] except __HOLE__: return None srevs = filter(lambda s: s.startswith(srev), srevs) if len(srevs) == 1: return srevs[0] return None
KeyError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.fullrev
5,444
def get_obj_size(self, sha): sha = str(sha) try: obj_size = int(self.repo.cat_file("-s", sha).strip()) except __HOLE__: raise GitErrorSha("object '%s' not found" % sha) return obj_size
ValueError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.get_obj_size
5,445
def children(self, sha): db = self.get_commits() try: return list(db[sha][0]) except __HOLE__: return []
KeyError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.children
5,446
def parents(self, sha): db = self.get_commits() try: return list(db[sha][1]) except __HOLE__: return []
KeyError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.parents
5,447
@contextmanager def get_historian(self, sha, base_path): p = [] change = {} next_path = [] def name_status_gen(): p[:] = [self.repo.log_pipe('--pretty=format:%n%H', '--name-status', sha, '--', base_path)] f = p[0].stdout for l in f: if l == '\n': continue old_sha = l.rstrip('\n') for l in f: if l == '\n': break _, path = l.rstrip('\n').split('\t', 1) while path not in change: change[path] = old_sha if next_path == [path]: yield old_sha try: path, _ = path.rsplit('/', 1) except ValueError: break f.close() p[0].terminate() p[0].wait() p[:] = [] while True: yield None gen = name_status_gen() def historian(path): try: return change[path] except __HOLE__: next_path[:] = [path] return gen.next() yield historian if p: p[0].stdout.close() p[0].terminate() p[0].wait()
KeyError
dataset/ETHPy150Open hvr/trac-git-plugin/tracext/git/PyGIT.py/Storage.get_historian
5,448
@contextmanager def cleanup(self, prog): try: yield except __HOLE__: log.error('KeyboardInterrupt') finally: if not prog.is_ok: log.info("Program manager letting program fail")
KeyboardInterrupt
dataset/ETHPy150Open Anaconda-Platform/chalmers/chalmers/program_manager.py/ProgramManager.cleanup
5,449
def _write(self, file, node, encoding, namespaces): # write XML to file tag = node.tag if tag is Comment: file.write("<!-- %s -->" % _escape_cdata(node.text, encoding)) elif tag is ProcessingInstruction: file.write("<?%s?>" % _escape_cdata(node.text, encoding)) else: items = node.items() xmlns_items = [] # new namespaces in this scope try: if isinstance(tag, QName) or tag[:1] == "{": tag, xmlns = fixtag(tag, namespaces) if xmlns: xmlns_items.append(xmlns) except TypeError: _raise_serialization_error(tag) file.write("<" + _encode(tag, encoding)) if items or xmlns_items: items.sort() # lexical order for k, v in items: try: if isinstance(k, QName) or k[:1] == "{": k, xmlns = fixtag(k, namespaces) if xmlns: xmlns_items.append(xmlns) except __HOLE__: _raise_serialization_error(k) try: if isinstance(v, QName): v, xmlns = fixtag(v, namespaces) if xmlns: xmlns_items.append(xmlns) except TypeError: _raise_serialization_error(v) file.write(" %s=\"%s\"" % (_encode(k, encoding), _escape_attrib(v, encoding))) for k, v in xmlns_items: file.write(" %s=\"%s\"" % (_encode(k, encoding), _escape_attrib(v, encoding))) if node.text or len(node): file.write(">") if node.text: file.write(_escape_cdata(node.text, encoding)) for n in node: self._write(file, n, encoding, namespaces) file.write("</" + _encode(tag, encoding) + ">") else: file.write(" />") for k, v in xmlns_items: del namespaces[v] if node.tail: file.write(_escape_cdata(node.tail, encoding)) # -------------------------------------------------------------------- # helpers ## # Checks if an object appears to be a valid element object. # # @param An element instance. # @return A true value if this is an element object. # @defreturn flag
TypeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/ElementTree._write
5,450
def _encode(s, encoding): try: return s.encode(encoding) except __HOLE__: return s # 1.5.2: assume the string uses the right encoding
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/_encode
5,451
def _encode_entity(text, pattern=_escape): # map reserved and non-ascii characters to numerical entities def escape_entities(m, map=_escape_map): out = [] append = out.append for char in m.group(): text = map.get(char) if text is None: text = "&#%d;" % ord(char) append(text) return string.join(out, "") try: return _encode(pattern.sub(escape_entities, text), "ascii") except __HOLE__: _raise_serialization_error(text) # # the following functions assume an ascii-compatible encoding # (or "utf-16")
TypeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/_encode_entity
5,452
def _escape_cdata(text, encoding=None, replace=string.replace): # escape character data try: if encoding: try: text = _encode(text, encoding) except UnicodeError: return _encode_entity(text) text = replace(text, "&", "&amp;") text = replace(text, "<", "&lt;") text = replace(text, ">", "&gt;") return text except (__HOLE__, AttributeError): _raise_serialization_error(text)
TypeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/_escape_cdata
5,453
def _escape_attrib(text, encoding=None, replace=string.replace): # escape attribute value try: if encoding: try: text = _encode(text, encoding) except UnicodeError: return _encode_entity(text) text = replace(text, "&", "&amp;") text = replace(text, "'", "&apos;") # FIXME: overkill text = replace(text, "\"", "&quot;") text = replace(text, "<", "&lt;") text = replace(text, ">", "&gt;") return text except (TypeError, __HOLE__): _raise_serialization_error(text)
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/_escape_attrib
5,454
def __init__(self, source, events=None): if not hasattr(source, "read"): source = open(source, "rb") self._file = source self._events = [] self._index = 0 self.root = self._root = None self._parser = XMLTreeBuilder() # wire up the parser for event reporting parser = self._parser._parser append = self._events.append if events is None: events = ["end"] for event in events: if event == "start": try: parser.ordered_attributes = 1 parser.specified_attributes = 1 def handler(tag, attrib_in, event=event, append=append, start=self._parser._start_list): append((event, start(tag, attrib_in))) parser.StartElementHandler = handler except __HOLE__: def handler(tag, attrib_in, event=event, append=append, start=self._parser._start): append((event, start(tag, attrib_in))) parser.StartElementHandler = handler elif event == "end": def handler(tag, event=event, append=append, end=self._parser._end): append((event, end(tag))) parser.EndElementHandler = handler elif event == "start-ns": def handler(prefix, uri, event=event, append=append): try: uri = _encode(uri, "ascii") except UnicodeError: pass append((event, (prefix or "", uri))) parser.StartNamespaceDeclHandler = handler elif event == "end-ns": def handler(prefix, event=event, append=append): append((event, None)) parser.EndNamespaceDeclHandler = handler
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/iterparse.__init__
5,455
def next(self): while 1: try: item = self._events[self._index] except __HOLE__: if self._parser is None: self.root = self._root try: raise StopIteration except NameError: raise IndexError # load event buffer del self._events[:] self._index = 0 data = self._file.read(16384) if data: self._parser.feed(data) else: self._root = self._parser.close() self._parser = None else: self._index = self._index + 1 return item
IndexError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/iterparse.next
5,456
def __init__(self, html=0, target=None): try: from xml.parsers import expat except __HOLE__: raise ImportError( "No module named expat; use SimpleXMLTreeBuilder instead" ) self._parser = parser = expat.ParserCreate(None, "}") if target is None: target = TreeBuilder() self._target = target self._names = {} # name memo cache # callbacks parser.DefaultHandlerExpand = self._default parser.StartElementHandler = self._start parser.EndElementHandler = self._end parser.CharacterDataHandler = self._data # let expat do the buffering, if supported try: self._parser.buffer_text = 1 except AttributeError: pass # use new-style attribute handling, if supported try: self._parser.ordered_attributes = 1 self._parser.specified_attributes = 1 parser.StartElementHandler = self._start_list except AttributeError: pass encoding = None if not parser.returns_unicode: encoding = "utf-8" # target.xml(encoding, None) self._doctype = None self.entity = {}
ImportError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/XMLTreeBuilder.__init__
5,457
def _fixname(self, key): # expand qname, and convert name string to ascii, if possible try: name = self._names[key] except __HOLE__: name = key if "}" in name: name = "{" + name self._names[key] = name = self._fixtext(name) return name
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/XMLTreeBuilder._fixname
5,458
def _default(self, text): prefix = text[:1] if prefix == "&": # deal with undefined entities try: self._target.data(self.entity[text[1:-1]]) except __HOLE__: from xml.parsers import expat raise expat.error( "undefined entity %s: line %d, column %d" % (text, self._parser.ErrorLineNumber, self._parser.ErrorColumnNumber) ) elif prefix == "<" and text[:9] == "<!DOCTYPE": self._doctype = [] # inside a doctype declaration elif self._doctype is not None: # parse doctype contents if prefix == ">": self._doctype = None return text = string.strip(text) if not text: return self._doctype.append(text) n = len(self._doctype) if n > 2: type = self._doctype[1] if type == "PUBLIC" and n == 4: name, type, pubid, system = self._doctype elif type == "SYSTEM" and n == 3: name, type, system = self._doctype pubid = None else: return if pubid: pubid = pubid[1:-1] self.doctype(name, pubid, system[1:-1]) self._doctype = None ## # Handles a doctype declaration. # # @param name Doctype name. # @param pubid Public identifier. # @param system System identifier.
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/xml/etree/ElementTree.py/XMLTreeBuilder._default
5,459
def test_expiration(): time = 1 cache = Cache(max_age=5, clock=lambda: time) # Ensure that the clock value is coming from the current value of the # `time` variable. assert cache.clock() == 1 time = 2 assert cache.clock() == 2 cache['a'] = 'b' cache['b'] = 'c' time += 3 cache['c'] = 'd' cache['d'] = 'e' assert len(cache) == 4 assert len(cache.queue) == 4 cache.evict_expired() assert len(cache) == 4 assert len(cache.queue) == 4 time += 3 cache.evict_expired() assert len(cache) == 2 assert len(cache.queue) == 2 assert 'a' not in cache assert 'b' not in cache assert 'c' in cache assert 'd' in cache cache['c'] = 'f' assert len(cache) == 2 assert len(cache.queue) == 3 time += 3 cache.evict_expired() assert len(cache) == 1 assert len(cache.queue) == 1 assert 'c' in cache try: _ = cache['d'] assert False, "'d' should not be in cache" except __HOLE__: pass
IndexError
dataset/ETHPy150Open kgaughan/uwhoisd/tests/test_cache.py/test_expiration
5,460
def clean_json(resource_json, resources_map): """ Cleanup the a resource dict. For now, this just means replacing any Ref node with the corresponding physical_resource_id. Eventually, this is where we would add things like function parsing (fn::) """ if isinstance(resource_json, dict): if 'Ref' in resource_json: # Parse resource reference resource = resources_map[resource_json['Ref']] if hasattr(resource, 'physical_resource_id'): return resource.physical_resource_id else: return resource if "Fn::FindInMap" in resource_json: map_name = resource_json["Fn::FindInMap"][0] map_path = resource_json["Fn::FindInMap"][1:] result = resources_map[map_name] for path in map_path: result = result[clean_json(path, resources_map)] return result if 'Fn::GetAtt' in resource_json: resource = resources_map.get(resource_json['Fn::GetAtt'][0]) if resource is None: return resource_json try: return resource.get_cfn_attribute(resource_json['Fn::GetAtt'][1]) except __HOLE__ as n: logger.warning(n.message.format(resource_json['Fn::GetAtt'][0])) except UnformattedGetAttTemplateException: raise BotoServerError( UnformattedGetAttTemplateException.status_code, 'Bad Request', UnformattedGetAttTemplateException.description.format( resource_json['Fn::GetAtt'][0], resource_json['Fn::GetAtt'][1])) if 'Fn::If' in resource_json: condition_name, true_value, false_value = resource_json['Fn::If'] if resources_map[condition_name]: return clean_json(true_value, resources_map) else: return clean_json(false_value, resources_map) if 'Fn::Join' in resource_json: join_list = [] for val in resource_json['Fn::Join'][1]: cleaned_val = clean_json(val, resources_map) join_list.append('{0}'.format(cleaned_val) if cleaned_val else '{0}'.format(val)) return resource_json['Fn::Join'][0].join(join_list) cleaned_json = {} for key, value in resource_json.items(): cleaned_json[key] = clean_json(value, resources_map) return cleaned_json elif isinstance(resource_json, list): return [clean_json(val, resources_map) for val in resource_json] else: return resource_json
NotImplementedError
dataset/ETHPy150Open spulec/moto/moto/cloudformation/parsing.py/clean_json
5,461
def _parse_comments(s): """ Parses vim's comments option to extract comment format """ i = iter(s.split(",")) rv = [] try: while True: # get the flags and text of a comment part flags, text = next(i).split(':', 1) if len(flags) == 0: rv.append((text, text, text, "")) # parse 3-part comment, but ignore those with O flag elif 's' in flags and 'O' not in flags: ctriple = ["TRIPLE"] indent = "" if flags[-1] in string.digits: indent = " " * int(flags[-1]) ctriple.append(text) flags, text = next(i).split(':', 1) assert flags[0] == 'm' ctriple.append(text) flags, text = next(i).split(':', 1) assert flags[0] == 'e' ctriple.append(text) ctriple.append(indent) rv.append(ctriple) elif 'b' in flags: if len(text) == 1: rv.insert(0, ("SINGLE_CHAR", text, text, text, "")) except __HOLE__: return rv
StopIteration
dataset/ETHPy150Open honza/vim-snippets/pythonx/vimsnippets.py/_parse_comments
5,462
def lpsolve(self, solver="scip", clean=True): self.print_instance() solver = SCIPSolver if solver == "scip" else GLPKSolver lp_data = self.handle.getvalue() self.handle.close() g = solver(lp_data, clean=clean) selected = set(g.results) try: obj_val = g.obj_val except __HOLE__: # No solution! return None, None return selected, obj_val
AttributeError
dataset/ETHPy150Open tanghaibao/jcvi/algorithms/lpsolve.py/LPInstance.lpsolve
5,463
def get_names_from_path(fontpath): """Parse underscore(or hyphen)-separated font file names into ``family`` and ``style`` names.""" _file = os.path.basename(fontpath) _file_name = os.path.splitext(_file)[0] try: family_name, style_name = _file_name.split('_') except __HOLE__: family_name, style_name = _file_name.split('-') style_name = get_parameters_from_style(style_name) style_name = ' '.join(style_name) return family_name, style_name
ValueError
dataset/ETHPy150Open gferreira/hTools2/Lib/hTools2/modules/fileutils.py/get_names_from_path
5,464
def create_indexes(self, colname, ncolname, extracolname): if not self.indexed: return try: kind = self.kind vprint("* Indexing ``%s`` columns. Type: %s." % (colname, kind)) for acolname in [colname, ncolname, extracolname]: acolumn = self.table.colinstances[acolname] acolumn.create_index( kind=self.kind, optlevel=self.optlevel, _blocksizes=small_blocksizes, _testmode=True) except TypeError as te: if self.colNotIndexable_re.search(str(te)): raise SilentlySkipTest( "Columns of this type can not be indexed.") raise except __HOLE__: raise SilentlySkipTest( "Indexing columns of this type is not supported yet.")
NotImplementedError
dataset/ETHPy150Open PyTables/PyTables/tables/tests/test_queries.py/BaseTableQueryTestCase.create_indexes
5,465
def create_test_method(type_, op, extracond, func=None): sctype = sctype_from_type[type_] # Compute the value of bounds. condvars = {'bound': right_bound, 'lbound': left_bound, 'rbound': right_bound, 'func_bound': func_bound} for (bname, bvalue) in six.iteritems(condvars): if type_ == 'string': bvalue = str_format % bvalue bvalue = nxtype_from_type[type_](bvalue) condvars[bname] = bvalue # Compute the name of columns. colname = 'c_%s' % type_ ncolname = 'c_nested/%s' % colname # Compute the query condition. if not op: # as is cond = colname elif op == '~': # unary cond = '~(%s)' % colname elif op == '<' and func is None: # binary variable-constant cond = '%s %s %s' % (colname, op, repr(condvars['bound'])) elif isinstance(op, tuple): # double binary variable-constant cond = ('(lbound %s %s) & (%s %s rbound)' % (op[0], colname, colname, op[1])) elif func is not None: cond = '%s(%s) %s func_bound' % (func, colname, op) else: # function or binary variable-variable cond = '%s %s bound' % (colname, op) if extracond: cond = '(%s) %s' % (cond, extracond) def ignore_skipped(oldmethod): @functools.wraps(oldmethod) def newmethod(self, *args, **kwargs): self._verboseHeader() try: return oldmethod(self, *args, **kwargs) except SilentlySkipTest as se: if se.args: msg = se.args[0] else: msg = "<skipped>" common.verbosePrint("\nSkipped test: %s" % msg) finally: common.verbosePrint('') # separator line between tests return newmethod @ignore_skipped def test_method(self): vprint("* Condition is ``%s``." % cond) # Replace bitwise operators with their logical counterparts. pycond = cond for (ptop, pyop) in [('&', 'and'), ('|', 'or'), ('~', 'not')]: pycond = pycond.replace(ptop, pyop) pycond = compile(pycond, '<string>', 'eval') table = self.table self.create_indexes(colname, ncolname, 'c_idxextra') table_slice = dict(start=1, stop=table.nrows - 5, step=3) rownos, fvalues = None, None # Test that both simple and nested columns work as expected. # Knowing how the table is filled, results must be the same. for acolname in [colname, ncolname]: # First the reference Python version. pyrownos, pyfvalues, pyvars = [], [], condvars.copy() for row in table.iterrows(**table_slice): pyvars[colname] = row[acolname] pyvars['c_extra'] = row['c_extra'] pyvars['c_idxextra'] = row['c_idxextra'] try: isvalidrow = eval(pycond, func_info, pyvars) except TypeError: raise SilentlySkipTest( "The Python type does not support the operation.") if isvalidrow: pyrownos.append(row.nrow) pyfvalues.append(row[acolname]) pyrownos = numpy.array(pyrownos) # row numbers already sorted pyfvalues = numpy.array(pyfvalues, dtype=sctype) pyfvalues.sort() vprint("* %d rows selected by Python from ``%s``." % (len(pyrownos), acolname)) if rownos is None: rownos = pyrownos # initialise reference results fvalues = pyfvalues else: self.assertTrue(numpy.all(pyrownos == rownos)) # check self.assertTrue(numpy.all(pyfvalues == fvalues)) # Then the in-kernel or indexed version. ptvars = condvars.copy() ptvars[colname] = table.colinstances[acolname] ptvars['c_extra'] = table.colinstances['c_extra'] ptvars['c_idxextra'] = table.colinstances['c_idxextra'] try: isidxq = table.will_query_use_indexing(cond, ptvars) # Query twice to trigger possible query result caching. ptrownos = [table.get_where_list(cond, condvars, sort=True, **table_slice) for _ in range(2)] ptfvalues = [ table.read_where(cond, condvars, field=acolname, **table_slice) for _ in range(2) ] except TypeError as te: if self.condNotBoolean_re.search(str(te)): raise SilentlySkipTest("The condition is not boolean.") raise except __HOLE__: raise SilentlySkipTest( "The PyTables type does not support the operation.") for ptfvals in ptfvalues: # row numbers already sorted ptfvals.sort() vprint("* %d rows selected by PyTables from ``%s``" % (len(ptrownos[0]), acolname), nonl=True) vprint("(indexing: %s)." % ["no", "yes"][bool(isidxq)]) self.assertTrue(numpy.all(ptrownos[0] == rownos)) self.assertTrue(numpy.all(ptfvalues[0] == fvalues)) # The following test possible caching of query results. self.assertTrue(numpy.all(ptrownos[0] == ptrownos[1])) self.assertTrue(numpy.all(ptfvalues[0] == ptfvalues[1])) test_method.__doc__ = "Testing ``%s``." % cond return test_method
NotImplementedError
dataset/ETHPy150Open PyTables/PyTables/tables/tests/test_queries.py/create_test_method
5,466
def transcript_iterator(gff_iterator, strict=True): """iterate over the contents of a gtf file. return a list of entries with the same transcript id. Any features without a transcript_id will be ignored. The entries for the same transcript have to be consecutive in the file. If *strict* is set an AssertionError will be raised if that is not true. """ last = None matches = [] found = set() for gff in gff_iterator: # ignore entries without transcript or gene id try: this = gff.transcript_id + gff.gene_id except __HOLE__: continue if last != this: if last: yield matches matches = [] assert not strict or this not in found, \ "duplicate entry: %s" % this found.add(this) last = this matches.append(gff) if last: yield matches
AttributeError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/GTF.py/transcript_iterator
5,467
def iterator_transcripts2genes(gtf_iterator, min_overlap=0): """cluster transcripts by exon overlap. The gene id is set to the first transcript encountered of a gene. If a gene stretches over several contigs, subsequent copies are appended a number. """ map_transcript2gene = {} gene_ids = collections.defaultdict(list) for chunk in iterator_overlaps(gtf_iterator): transcript_ids = list(set([x.transcript_id for x in chunk])) contig = chunk[0].contig # have any of these already encountered? for x in transcript_ids: if x in map_transcript2gene: gene_id = map_transcript2gene[x] break else: # arbitrarily pick one gene_id = transcript_ids[0] if gene_id not in gene_ids: gene_ids[gene_id].append(contig) index = 0 else: try: index = gene_ids[gene_id].index(contig) except __HOLE__: index = len(gene_ids[gene_id]) gene_ids[gene_id].append(contig) for x in transcript_ids: map_transcript2gene[x] = gene_id if index: gene_id += ".%i" % index for x in chunk: x.gene_id = gene_id yield chunk
ValueError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/GTF.py/iterator_transcripts2genes
5,468
def read(self, line): """read gff entry from line in GTF/GFF format. <seqname> <source> <feature> <start> <end> <score> \ <strand> <frame> [attributes] [comments] """ data = line[:-1].split("\t") try: (self.contig, self.source, self.feature, self.start, self.end, self.score, self.strand, self.frame) = data[:8] except __HOLE__: raise ValueError("parsing error in line `%s`" % line) # note: frame might be . (self.start, self.end) = map(int, (self.start, self.end)) self.start -= 1 self.parseInfo(data[8], line)
ValueError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/GTF.py/Entry.read
5,469
def parseInfo(self, attributes, line): """parse attributes. This method will set the gene_id and transcript_id attributes if present. """ # remove comments attributes = attributes.split("#")[0] # separate into fields # Fields might contain a ";", for example in ENSEMBL GTF file # for mouse, v78: # ...; transcript_name "TXNRD2;-001"; .... # The current heuristic is to split on a semicolon followed by a # space, which seems to be part of the specification, see # http://mblab.wustl.edu/GTF22.html fields = map(lambda x: x.strip(), attributes.split("; ")[:-1]) self.attributes = {} for f in fields: d = map(lambda x: x.strip(), f.split(" ")) n, v = d[0], " ".join(d[1:]) if len(d) > 2: v = d[1:] if v[0] == '"' and v[-1] == '"': v = v[1:-1] else: # try to convert to a value try: v = float(v) v = int(v) except ValueError: pass except __HOLE__: pass if n == "gene_id": self.gene_id = v elif n == "transcript_id": self.transcript_id = v else: self.attributes[n] = v if not self.gene_id: raise ParsingError("missing attribute 'gene_id' in line %s" % line) if not self.transcript_id: raise ParsingError( "missing attribute 'transcript_id' in line %s" % line)
TypeError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/GTF.py/Entry.parseInfo
5,470
def copy(self, other): """fill from other entry. This method works if other is :class:`GTF.Entry` or :class:`pysam.GTFProxy`. """ self.contig = other.contig self.source = other.source self.feature = other.feature self.start = other.start self.end = other.end self.score = other.score self.strand = other.strand self.frame = other.frame # gene_id and transcript_id can be optional try: self.gene_id = other.gene_id except AttributeError: pass try: self.transcript_id = other.transcript_id except AttributeError: pass self.attributes = copy.copy(other.asDict()) # from gff - remove gene_id and transcript_id from attributes try: del self.attributes["gene_id"] del self.attributes["transcript_id"] except __HOLE__: pass return self
KeyError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/GTF.py/Entry.copy
5,471
def check_dependencies(self, obj, failed): for dep in self.dependencies: peer_name = dep[0].lower() + dep[1:] # django names are camelCased with the first letter lower peer_objects=[] try: peer_names = plural(peer_name) peer_object_list=[] try: peer_object_list.append(deepgetattr(obj, peer_name)) except: pass try: peer_object_list.append(deepgetattr(obj, peer_names)) except: pass for peer_object in peer_object_list: try: peer_objects.extend(peer_object.all()) except __HOLE__: peer_objects.append(peer_object) except: peer_objects = [] if (hasattr(obj,'controller')): try: peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects) except AttributeError: pass if (failed in peer_objects): if (obj.backend_status!=failed.backend_status): obj.backend_status = failed.backend_status obj.save(update_fields=['backend_status']) raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
AttributeError
dataset/ETHPy150Open open-cloud/xos/xos/synchronizers/base/syncstep.py/SyncStep.check_dependencies
5,472
def sync_record(self, o): try: controller = o.get_controller() controller_register = json.loads(controller.backend_register) if (controller_register.get('disabled',False)): raise InnocuousException('Controller %s is disabled'%controller.name) except __HOLE__: pass tenant_fields = self.map_sync_inputs(o) if tenant_fields == SyncStep.SYNC_WITHOUT_RUNNING: return main_objs=self.observes if (type(main_objs) is list): main_objs=main_objs[0] path = ''.join(main_objs.__name__).lower() res = run_template(self.playbook,tenant_fields,path=path) try: self.map_sync_outputs(o,res) except AttributeError: pass
AttributeError
dataset/ETHPy150Open open-cloud/xos/xos/synchronizers/base/syncstep.py/SyncStep.sync_record
5,473
def delete_record(self, o): try: controller = o.get_controller() controller_register = json.loads(o.node.site_deployment.controller.backend_register) if (controller_register.get('disabled',False)): raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name) except AttributeError: pass tenant_fields = self.map_delete_inputs(o) main_objs=self.observes if (type(main_objs) is list): main_objs=main_objs[0] path = ''.join(main_objs.__name__).lower() tenant_fields['delete']=True res = run_template(self.playbook,tenant_fields,path=path) try: self.map_delete_outputs(o,res) except __HOLE__: pass
AttributeError
dataset/ETHPy150Open open-cloud/xos/xos/synchronizers/base/syncstep.py/SyncStep.delete_record
5,474
def call(self, failed=[], deletion=False): #if ('Instance' in self.__class__.__name__): # pdb.set_trace() pending = self.fetch_pending(deletion) for o in pending: # another spot to clean up debug state try: reset_queries() except: # this shouldn't happen, but in case it does, catch it... logger.log_exc("exception in reset_queries",extra=o.tologdict()) sync_failed = False try: backoff_disabled = Config().observer_backoff_disabled except: backoff_disabled = 0 try: scratchpad = json.loads(o.backend_register) if (scratchpad): next_run = scratchpad['next_run'] if (not backoff_disabled and next_run>time.time()): sync_failed = True except: logger.log_exc("Exception while loading scratchpad",extra=o.tologdict()) pass if (not sync_failed): try: for f in failed: self.check_dependencies(o,f) # Raises exception if failed if (deletion): self.delete_record(o) o.delete(purge=True) else: new_enacted = datetime.now() # Is this the same timezone? XXX self.sync_record(o) o.enacted = new_enacted scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time()} o.backend_register = json.dumps(scratchpad) o.backend_status = "1 - OK" o.save(update_fields=['enacted','backend_status','backend_register']) except (InnocuousException,Exception,DeferredException) as e: logger.log_exc("sync step failed!",extra=o.tologdict()) try: if (o.backend_status.startswith('2 - ')): str_e = '%s // %r'%(o.backend_status[4:],e) str_e = elim_dups(str_e) else: str_e = '%r'%e except: str_e = '%r'%e try: error = self.error_map.map(str_e) except: error = '%s'%str_e if isinstance(e, InnocuousException): o.backend_status = '1 - %s'%error else: o.backend_status = '2 - %s'%error try: scratchpad = json.loads(o.backend_register) scratchpad['exponent'] except: logger.log_exc("Exception while updating scratchpad",extra=o.tologdict()) scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time(),'failures':0} # Second failure if (scratchpad['exponent']): if isinstance(e,DeferredException): delay = scratchpad['exponent'] * 60 # 1 minute else: delay = scratchpad['exponent'] * 600 # 10 minutes # cap delays at 8 hours if (delay>8*60*60): delay=8*60*60 scratchpad['next_run'] = time.time() + delay try: scratchpad['exponent']+=1 except: scratchpad['exponent']=1 try: scratchpad['failures']+=1 except __HOLE__: scratchpad['failures']=1 scratchpad['last_failure']=time.time() o.backend_register = json.dumps(scratchpad) # TOFIX: # DatabaseError: value too long for type character varying(140) if (o.pk): try: o.backend_status = o.backend_status[:1024] o.save(update_fields=['backend_status','backend_register','updated']) except: print "Could not update backend status field!" pass sync_failed = True if (sync_failed): failed.append(o) return failed
KeyError
dataset/ETHPy150Open open-cloud/xos/xos/synchronizers/base/syncstep.py/SyncStep.call
5,475
def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False, local_only=False): """ Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.model._meta qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name aliases = set() only_load = self.deferred_to_columns() # Skip all proxy to the root proxied model proxied_model = get_proxied_model(opts) if start_alias: seen = {None: start_alias} for field, model in opts.get_fields_with_model(): if local_only and model is not None: continue if start_alias: try: alias = seen[model] except __HOLE__: if model is proxied_model: alias = start_alias else: link_field = opts.get_ancestor_link(model) alias = self.query.join((start_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) seen[model] = alias else: # If we're starting from the base model of the queryset, the # aliases will have already been set up in pre_sql_setup(), so # we can save time here. alias = self.query.included_inherited_models[model] table = self.query.alias_map[alias][TABLE_NAME] if table in only_load and field.column not in only_load[table]: continue if as_pairs: result.append((alias, field.column)) aliases.add(alias) continue if with_aliases and field.column in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s.%s AS %s' % (qn(alias), qn2(field.column), c_alias)) col_aliases.add(c_alias) aliases.add(c_alias) else: r = '%s.%s' % (qn(alias), qn2(field.column)) result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) return result, aliases
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/db/models/sql/compiler.py/SQLCompiler.get_default_columns
5,476
def get_from_clause(self): """ Returns a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Sub-classes, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables we need. This means the select columns and ordering must be done first. """ result = [] qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name first = True for alias in self.query.tables: if not self.query.alias_refcount[alias]: continue try: name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias] except __HOLE__: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue alias_str = (alias != name and ' %s' % alias or '') if join_type and not first: result.append('%s %s%s ON (%s.%s = %s.%s)' % (join_type, qn(name), alias_str, qn(lhs), qn2(lhs_col), qn(alias), qn2(col))) else: connector = not first and ', ' or '' result.append('%s%s%s' % (connector, qn(name), alias_str)) first = False for t in self.query.extra_tables: alias, unused = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # calls increments the refcount, so an alias refcount of one means # this is the only reference. if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: connector = not first and ', ' or '' result.append('%s%s' % (connector, qn(alias))) first = False return result, []
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/db/models/sql/compiler.py/SQLCompiler.get_from_clause
5,477
@classmethod def query_mongo(cls, username, query=None, fields=None, sort=None, start=0, limit=DEFAULT_LIMIT, count=False): query = dict_for_mongo(query) if query else {} query[cls.ACCOUNT] = username # TODO find better method # check for the created_on key in query and turn its values into dates if type(query) == dict and cls.CREATED_ON in query: if type(query[cls.CREATED_ON]) is dict: for op, val in query[cls.CREATED_ON].iteritems(): try: query[cls.CREATED_ON][op] = datetime.strptime( val, DATETIME_FORMAT) except ValueError: pass elif isinstance(query[cls.CREATED_ON], basestring): val = query[cls.CREATED_ON] try: created_on = datetime.strptime(val, DATETIME_FORMAT) except __HOLE__: pass else: # create start and end times for the entire day start_time = created_on.replace(hour=0, minute=0, second=0, microsecond=0) end_time = start_time + timedelta(days=1) query[cls.CREATED_ON] = {"$gte": start_time, "$lte": end_time} # TODO: current mongo (2.0.4 of this writing) # cant mix including and excluding fields in a single query fields_to_select = None if type(fields) == list and len(fields) > 0: fields_to_select = dict([(_encode_for_mongo(field), 1) for field in fields]) cursor = audit.find(query, fields_to_select) if count: return [{"count": cursor.count()}] cursor.skip(max(start, 0)).limit(limit) if type(sort) == dict and len(sort) == 1: sort_key = sort.keys()[0] # TODO: encode sort key if it has dots sort_dir = int(sort[sort_key]) # -1 for desc, 1 for asc cursor.sort(_encode_for_mongo(sort_key), sort_dir) # set batch size for cursor iteration cursor.batch_size = cls.DEFAULT_BATCHSIZE return cursor
ValueError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/main/models/audit.py/AuditLog.query_mongo
5,478
def get_plural(locale=LC_CTYPE): """A tuple with the information catalogs need to perform proper pluralization. The first item of the tuple is the number of plural forms, the second the plural expression. >>> get_plural(locale='en') (2, '(n != 1)') >>> get_plural(locale='ga') (3, '(n==1 ? 0 : n==2 ? 1 : 2)') The object returned is a special tuple with additional members: >>> tup = get_plural("ja") >>> tup.num_plurals 1 >>> tup.plural_expr '0' >>> tup.plural_forms 'npurals=1; plural=0' Converting the tuple into a string prints the plural forms for a gettext catalog: >>> str(tup) 'npurals=1; plural=0' """ locale = Locale.parse(locale) try: tup = PLURALS[str(locale)] except __HOLE__: try: tup = PLURALS[locale.language] except KeyError: tup = DEFAULT_PLURAL return _PluralTuple(tup)
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Babel-0.9.6/babel/messages/plurals.py/get_plural
5,479
def _clean_output(out): try: out = out.decode('utf-8') except __HOLE__: # python3, pragma: no cover pass return out.strip()
AttributeError
dataset/ETHPy150Open hayd/pep8radius/pep8radius/shell.py/_clean_output
5,480
def _validate_database(self): """ Makes sure that the database is openable. Removes the file if it's not. """ # If there is no file there, that's fine. It will get created when # we connect. if not os.path.exists(self.filename): self._create_database() return # If we can connect to the database, and do anything, then all is good. try: with self._connect() as conn: self._create_tables(conn) return except sqlite3.DatabaseError: pass # fall through to next case # If the file contains JSON with the right stuff in it, convert from # the old representation. try: with open(self.filename, 'rb') as f: data = json.loads(f.read().decode('utf-8')) keys = [ 'account_id', 'application_key', 'account_auth_token', 'api_url', 'download_url', 'minimum_part_size', 'realm' ] if all(k in data for k in keys): # remove the json file os.unlink(self.filename) # create a database self._create_database() # add the data from the JSON file with self._connect() as conn: self._create_tables(conn) insert_statement = """ INSERT INTO account (account_id, application_key, account_auth_token, api_url, download_url, minimum_part_size, realm) values (?, ?, ?, ?, ?, ?, ?); """ conn.execute(insert_statement, tuple(data[k] for k in keys)) # all is happy now return except __HOLE__: # includes json.decoder.JSONDecodeError pass # Remove the corrupted file and create a new database raise CorruptAccountInfo(self.filename)
ValueError
dataset/ETHPy150Open Backblaze/B2_Command_Line_Tool/b2/account_info.py/SqliteAccountInfo._validate_database
5,481
def test_upload_url_concurrency(): # Clean up from previous tests file_name = '/tmp/test_upload_conncurrency.db' try: os.unlink(file_name) except __HOLE__: pass # Make an account info with a bunch of upload URLs in it. account_info = SqliteAccountInfo(file_name) available_urls = set() for i in six.moves.range(3000): url = 'url_%d' % i account_info.put_bucket_upload_url('bucket-id', url, 'auth-token-%d' % i) available_urls.add(url) # Pull them all from the account info, from multiple threads lock = threading.Lock() def run_thread(): while True: (url, _) = account_info.take_bucket_upload_url('bucket-id') if url is None: break with lock: if url in available_urls: available_urls.remove(url) else: print('DOUBLE:', url) threads = [] for i in six.moves.range(5): thread = threading.Thread(target=run_thread) thread.start() threads.append(thread) for t in threads: t.join() # Check if len(available_urls) != 0: print('LEAK:', available_urls) # Clean up os.unlink(file_name)
OSError
dataset/ETHPy150Open Backblaze/B2_Command_Line_Tool/b2/account_info.py/test_upload_url_concurrency
5,482
def mod_data(opts, full): ''' Grab the module's data ''' ret = {} finder = modulefinder.ModuleFinder() try: finder.load_file(full) except __HOLE__ as exc: print('ImportError - {0} (Reason: {1})'.format(full, exc), file=sys.stderr) return ret for name, mod in finder.modules.items(): basemod = name.split('.')[0] if basemod in ret: continue if basemod.startswith('_'): continue if not mod.__file__: continue if opts['bif'] not in mod.__file__: # Bif - skip continue if name == os.path.basename(mod.__file__)[:-3]: continue ret[basemod] = mod.__file__ for name, err in finder.badmodules.items(): basemod = name.split('.')[0] if basemod in ret: continue if basemod.startswith('_'): continue ret[basemod] = err return ret
ImportError
dataset/ETHPy150Open saltstack/salt/tests/modparser.py/mod_data
5,483
def handle_noargs(self, **options): try: for line in self.handle_inspection(options): self.stdout.write("%s\n" % line) except __HOLE__: raise CommandError("Database inspection isn't supported for the currently selected database backend.")
NotImplementedError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/core/management/commands/inspectdb.py/Command.handle_noargs
5,484
def handle_inspection(self, options): connection = connections[options.get('database')] # 'table_name_filter' is a stealth option table_name_filter = options.get('table_name_filter') table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '') strip_prefix = lambda s: s.startswith("u'") and s[1:] or s cursor = connection.cursor() yield "# This is an auto-generated Django model module." yield "# You'll have to do the following manually to clean this up:" yield "# * Rearrange models' order" yield "# * Make sure each model has one field with primary_key=True" yield "# Feel free to rename the models, but don't rename db_table values or field names." yield "#" yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'" yield "# into your database." yield "from __future__ import unicode_literals" yield '' yield 'from %s import models' % self.db_module yield '' known_models = [] for table_name in connection.introspection.table_names(cursor): if table_name_filter is not None and callable(table_name_filter): if not table_name_filter(table_name): continue yield 'class %s(models.Model):' % table2model(table_name) known_models.append(table2model(table_name)) try: relations = connection.introspection.get_relations(cursor, table_name) except __HOLE__: relations = {} try: indexes = connection.introspection.get_indexes(cursor, table_name) except NotImplementedError: indexes = {} used_column_names = [] # Holds column names used in the table so far for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)): comment_notes = [] # Holds Field notes, to be displayed in a Python comment. extra_params = {} # Holds Field parameters such as 'db_column'. column_name = row[0] is_relation = i in relations att_name, params, notes = self.normalize_col_name( column_name, used_column_names, is_relation) extra_params.update(params) comment_notes.extend(notes) used_column_names.append(att_name) # Add primary_key and unique, if necessary. if column_name in indexes: if indexes[column_name]['primary_key']: extra_params['primary_key'] = True elif indexes[column_name]['unique']: extra_params['unique'] = True if is_relation: rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1]) if rel_to in known_models: field_type = 'ForeignKey(%s' % rel_to else: field_type = "ForeignKey('%s'" % rel_to else: # Calling `get_field_type` to get the field type string and any # additional paramters and notes. field_type, field_params, field_notes = self.get_field_type(connection, table_name, row) extra_params.update(field_params) comment_notes.extend(field_notes) field_type += '(' # Don't output 'id = meta.AutoField(primary_key=True)', because # that's assumed if it doesn't exist. if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}: continue # Add 'null' and 'blank', if the 'null_ok' flag was present in the # table description. if row[6]: # If it's NULL... extra_params['blank'] = True if not field_type in ('TextField(', 'CharField('): extra_params['null'] = True field_desc = '%s = models.%s' % (att_name, field_type) if extra_params: if not field_desc.endswith('('): field_desc += ', ' field_desc += ', '.join([ '%s=%s' % (k, strip_prefix(repr(v))) for k, v in extra_params.items()]) field_desc += ')' if comment_notes: field_desc += ' # ' + ' '.join(comment_notes) yield ' %s' % field_desc for meta_line in self.get_meta(table_name): yield meta_line
NotImplementedError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/core/management/commands/inspectdb.py/Command.handle_inspection
5,485
def get_field_type(self, connection, table_name, row): """ Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field. """ field_params = {} field_notes = [] try: field_type = connection.introspection.get_field_type(row[1], row) except __HOLE__: field_type = 'TextField' field_notes.append('This field type is a guess.') # This is a hook for DATA_TYPES_REVERSE to return a tuple of # (field_type, field_params_dict). if type(field_type) is tuple: field_type, new_params = field_type field_params.update(new_params) # Add max_length for all CharFields. if field_type == 'CharField' and row[3]: field_params['max_length'] = row[3] if field_type == 'DecimalField': field_params['max_digits'] = row[4] field_params['decimal_places'] = row[5] return field_type, field_params, field_notes
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/core/management/commands/inspectdb.py/Command.get_field_type
5,486
def run(self): self.assert_has_content() text = '\n'.join(self.content) try: if self.arguments: classes = directives.class_option(self.arguments[0]) else: classes = [] except __HOLE__: raise self.error( 'Invalid class attribute value for "%s" directive: "%s".' % (self.name, self.arguments[0])) node = nodes.container(text) node['classes'].extend(classes) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) return [node]
ValueError
dataset/ETHPy150Open adieu/allbuttonspressed/docutils/parsers/rst/directives/body.py/Container.run
5,487
def test_class_object_qualname(self): # Test preservation of instance method __qualname__ attribute. try: __qualname__ = OldClass1o.original.__qualname__ except __HOLE__: pass else: self.assertEqual(OldClass1d.function.__qualname__, __qualname__)
AttributeError
dataset/ETHPy150Open GrahamDumpleton/wrapt/tests/test_instancemethod.py/TestNamingInstanceMethodOldStyle.test_class_object_qualname
5,488
def test_instance_object_qualname(self): # Test preservation of instance method __qualname__ attribute. try: __qualname__ = OldClass1o().original.__qualname__ except __HOLE__: pass else: self.assertEqual(OldClass1d().function.__qualname__, __qualname__)
AttributeError
dataset/ETHPy150Open GrahamDumpleton/wrapt/tests/test_instancemethod.py/TestNamingInstanceMethodOldStyle.test_instance_object_qualname
5,489
def test_class_object_qualname(self): # Test preservation of instance method __qualname__ attribute. try: __qualname__ = NewClass1o.original.__qualname__ except __HOLE__: pass else: self.assertEqual(NewClass1d.function.__qualname__, __qualname__)
AttributeError
dataset/ETHPy150Open GrahamDumpleton/wrapt/tests/test_instancemethod.py/TestNamingInstanceMethodNewStyle.test_class_object_qualname
5,490
def test_instance_object_qualname(self): # Test preservation of instance method __qualname__ attribute. try: __qualname__ = NewClass1o().original.__qualname__ except __HOLE__: pass else: self.assertEqual(NewClass1d().function.__qualname__, __qualname__)
AttributeError
dataset/ETHPy150Open GrahamDumpleton/wrapt/tests/test_instancemethod.py/TestNamingInstanceMethodNewStyle.test_instance_object_qualname
5,491
def remove(self, player): if _debug: print 'DirectSoundWorker remove', player self.condition.acquire() try: self.players.remove(player) except __HOLE__: pass self.condition.notify() self.condition.release() if _debug: print 'return DirectSoundWorker remove', player
KeyError
dataset/ETHPy150Open ardekantur/pyglet/experimental/mt_media/drivers/directsound/__init__.py/DirectSoundWorker.remove
5,492
def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_liblsl', [dirname(__file__)]) except __HOLE__: import _liblsl return _liblsl if fp is not None: try: _mod = imp.load_module('_liblsl', fp, pathname, description) finally: fp.close() return _mod
ImportError
dataset/ETHPy150Open sccn/SNAP/src/pylsl/binaries-python2.4-win32/liblsl.py/swig_import_helper
5,493
def replaceReads(targetbam, donorbam, outputbam, nameprefix=None, excludefile=None, allreads=False, keepqual=False, progress=False, keepsecondary=False, seed=None): ''' targetbam, donorbam, and outputbam are pysam.Samfile objects outputbam must be writeable and use targetbam as template read names in excludefile will not appear in final output ''' if seed is not None: random.seed(int(seed)) # check whether references are compatible if not compare_ref(targetbam, donorbam): sys.exit("Target and donor are aligned to incompatable reference genomes!") RG = getRGs(targetbam) # read groups exclude = {} if excludefile: exclude = getExcludedReads(excludefile) # load reads from donorbam into dict sys.stdout.write("loading donor reads into dictionary...\n") nr = 0 rdict = {} secondary = [] # track secondary alignments, if specified excount = 0 # number of excluded reads nullcount = 0 # number of null reads for read in donorbam.fetch(until_eof=True): if read.seq and not read.is_secondary: # sanity check - don't include null reads, secondary alignments, supplementary alignments. if read.qname not in exclude: pairname = 'F' # read is first in pair if read.is_read2: pairname = 'S' # read is second in pair if not read.is_paired: pairname = 'U' # read is unpaired if nameprefix: qual = read.qual # temp read.qname = nameprefix + read.qname # must set name _before_ setting quality (see pysam docs) read.qual = qual extqname = ','.join((read.qname,pairname)) rdict[extqname] = read nr += 1 else: # excluded excount += 1 elif (read.is_secondary and keepsecondary): secondary.append(read) else: # no seq! nullcount += 1 sys.stdout.write("loaded " + str(nr) + " reads, (" + str(excount) + " excluded, " + str(nullcount) + " null or secondary --> ignored)\n") excount = 0 recount = 0 # number of replaced reads used = {} prog = 0 for read in targetbam.fetch(until_eof=True): prog += 1 if progress and prog % 10000000 == 0: sys.stdout.write("processed " + str(prog) + " reads.\n") if not read.is_secondary and read.qname not in exclude and bin(read.flag & 2048) != bin(2048): pairname = 'F' # read is first in pair if read.is_read2: pairname = 'S' # read is second in pair if not read.is_paired: pairname = 'U' # read is unpaired if nameprefix: qual = read.qual # temp read.qname = nameprefix + read.qname read.qual = qual extqname = ','.join((read.qname,pairname)) if extqname in rdict: # replace read if keepqual: try: rdict[extqname].qual = read.qual except __HOLE__ as e: sys.stdout.write("error replacing quality score for read: " + str(rdict[extqname].qname) + " : " + str(e) + "\n") sys.stdout.write("donor: " + str(rdict[extqname]) + "\n") sys.stdout.write("target: " + str(read) + "\n") sys.exit(1) rdict[extqname] = cleanup(rdict[extqname],read,RG) outputbam.write(rdict[extqname]) # write read from donor .bam used[extqname] = True recount += 1 else: read = cleanup(read,None,RG) outputbam.write(read) # write read from target .bam else: excount += 1 sys.stdout.write("replaced " + str(recount) + " reads (" + str(excount) + " excluded )\n") if keepsecondary: for secread in secondary: outputbam.write(secread) sys.stdout.write("kept " + str(len(secondary)) + " secondary reads.\n") nadded = 0 # dump the unused reads from the donor if requested with --all if allreads: for extqname in rdict.keys(): if extqname not in used and extqname not in exclude: rdict[extqname] = cleanup(rdict[extqname],None,RG) outputbam.write(rdict[extqname]) nadded += 1 sys.stdout.write("added " + str(nadded) + " reads due to --all\n")
ValueError
dataset/ETHPy150Open adamewing/bamsurgeon/bamsurgeon/replacereads.py/replaceReads
5,494
def _get_font_id(self): if PY2: try: return '|'.join([unicode(self.options[x]) for x in ('font_size', 'font_name_r', 'bold', 'italic')]) except __HOLE__: pass return '|'.join([str(self.options[x]) for x in ('font_size', 'font_name_r', 'bold', 'italic')])
UnicodeDecodeError
dataset/ETHPy150Open kivy/kivy/kivy/core/text/text_pygame.py/LabelPygame._get_font_id
5,495
def create_nonce(user, action, offset=0): if not user: nick = "" else: try: nick = user.nick except __HOLE__: if settings.MANAGE_PY: # extra case to make testing easier nick = clean.nick(user) else: raise i = math.ceil(time.time() / 43200) i += offset nonce = hash_generic(str(i) + action + nick) return nonce[-12:-2]
AttributeError
dataset/ETHPy150Open CollabQ/CollabQ/common/util.py/create_nonce
5,496
def get_user_from_topic(s): """Extracts the username from a topic or Stream object. Topics look like: 'stream/bar@example.com/comments' Returns: A string, the username, or None if the topic name didn't appear to contain a valid userid. """ o = None # Check whether we got a topic name or a Stream instance if not (isinstance(s, str) or isinstance(s, unicode)): s = s.key().name() list = s.split('/') try: email = list[1] if '@' in email: o = email except __HOLE__: # No '/' in s. pass return o
IndexError
dataset/ETHPy150Open CollabQ/CollabQ/common/util.py/get_user_from_topic
5,497
def page_offset(request): """attempts to normalize timestamps into datetimes for offsets""" offset = request.GET.get('offset', None) if offset: try: offset = datetime.datetime.fromtimestamp(float(offset)) except (__HOLE__, ValueError): offset = None return offset, (offset and True or False)
TypeError
dataset/ETHPy150Open CollabQ/CollabQ/common/util.py/page_offset
5,498
def paging_get_page(request): try: page = int(request.GET.get('page', 1)) except __HOLE__: page = 1 if page <= 0: page = 1 return page
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/common/util.py/paging_get_page
5,499
def get_metadata(name, default=None): metadata_ref = get_metadata_ref(name) if metadata_ref: value = metadata_ref.get_value() return value if default is None: try: default = getattr(settings, name) except __HOLE__: logging.warning("AttributeError, %s is not in settings" % name) return default
AttributeError
dataset/ETHPy150Open CollabQ/CollabQ/common/util.py/get_metadata