Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
4,500
def close(self): for fn in self.named_temporaries: try: os.remove(fn) except __HOLE__: pass
OSError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/BuildState.close
4,501
def make_named_temporary(self, identifier=None): """Creates a named temporary file and returns the filename for it. This can be usedful in some scenarious when building with external tools. """ dir = os.path.join(self.builder.meta_path, 'tmp') try: os.makedirs(dir) except __HOLE__: pass fn = os.path.join(dir, 'nt-%s-%s.tmp' % (identifier or 'generic', os.urandom(20).encode('hex'))) self.named_temporaries.add(fn) return fn
OSError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/BuildState.make_named_temporary
4,502
def _get_stat(self): rv = self._stat if rv is not None: return rv try: st = os.stat(self.filename) mtime = int(st.st_mtime) if stat.S_ISDIR(st.st_mode): size = len(os.listdir(self.filename)) is_dir = True else: size = int(st.st_size) is_dir = False rv = mtime, size, is_dir except __HOLE__: rv = 0, -1, False self._stat = rv return rv
OSError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/FileInfo._get_stat
4,503
@property def checksum(self): """The checksum of the file or directory.""" rv = self._checksum if rv is not None: return rv try: h = hashlib.sha1() if os.path.isdir(self.filename): h.update(b'DIR\x00') for filename in sorted(os.listdir(self.filename)): if self.env.is_uninteresting_source_name(filename): continue if isinstance(filename, unicode): filename = filename.encode('utf-8') h.update(filename) h.update(_describe_fs_path_for_checksum( os.path.join(self.filename, filename))) h.update(b'\x00') else: with open(self.filename, 'rb') as f: while 1: chunk = f.read(16 * 1024) if not chunk: break h.update(chunk) checksum = h.hexdigest() except (OSError, __HOLE__): checksum = '0' * 40 self._checksum = checksum return checksum
IOError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/FileInfo.checksum
4,504
def ensure_dir(self): """Creates the directory if it does not exist yet.""" dir = os.path.dirname(self.dst_filename) try: os.makedirs(dir) except __HOLE__: pass
OSError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/Artifact.ensure_dir
4,505
def _rollback(self): if self._new_artifact_file is not None: try: os.remove(self._new_artifact_file) except __HOLE__: pass self._new_artifact_file = None self._pending_update_ops = []
OSError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/Artifact._rollback
4,506
def __init__(self, pad, destination_path, build_flags=None): self.build_flags = process_build_flags(build_flags) self.pad = pad self.destination_path = os.path.abspath(os.path.join( pad.db.env.root_path, destination_path)) self.meta_path = os.path.join(self.destination_path, '.lektor') self.failure_controller = FailureController(pad, self.destination_path) try: os.makedirs(self.meta_path) except __HOLE__: pass con = self.connect_to_database() try: create_tables(con) finally: con.close()
OSError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/Builder.__init__
4,507
def touch_site_config(self): """Touches the site config which typically will trigger a rebuild.""" try: os.utime(os.path.join(self.env.root_path, 'site.ini'), None) except __HOLE__: pass
OSError
dataset/ETHPy150Open lektor/lektor/lektor/builder.py/Builder.touch_site_config
4,508
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15, no_fake=True): # making sure we use the absolute path to_dir = os.path.abspath(to_dir) was_imported = 'pkg_resources' in sys.modules or \ 'setuptools' in sys.modules try: try: import pkg_resources if not hasattr(pkg_resources, '_distribute'): if not no_fake: _fake_setuptools() raise ImportError except __HOLE__: return _do_download(version, download_base, to_dir, download_delay) try: pkg_resources.require("distribute>="+version) return except pkg_resources.VersionConflict: e = sys.exc_info()[1] if was_imported: sys.stderr.write( "The required version of distribute (>=%s) is not available,\n" "and can't be installed while this script is running. Please\n" "install a more recent version first, using\n" "'easy_install -U distribute'." "\n\n(Currently using %r)\n" % (version, e.args[0])) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return _do_download(version, download_base, to_dir, download_delay) except pkg_resources.DistributionNotFound: return _do_download(version, download_base, to_dir, download_delay) finally: if not no_fake: _create_fake_setuptools_pkg_info(to_dir)
ImportError
dataset/ETHPy150Open aerosol/django-dilla/distribute_setup.py/use_setuptools
4,509
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay=15): """Download distribute from a specified location and return its filename `version` should be a valid distribute version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) try: from urllib.request import urlopen except __HOLE__: from urllib2 import urlopen tgz_name = "distribute-%s.tar.gz" % version url = download_base + tgz_name saveto = os.path.join(to_dir, tgz_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: log.warn("Downloading %s", url) src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(saveto, "wb") dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto)
ImportError
dataset/ETHPy150Open aerosol/django-dilla/distribute_setup.py/download_setuptools
4,510
def _no_sandbox(function): def __no_sandbox(*args, **kw): try: from setuptools.sandbox import DirectorySandbox if not hasattr(DirectorySandbox, '_old'): def violation(*args): pass DirectorySandbox._old = DirectorySandbox._violation DirectorySandbox._violation = violation patched = True else: patched = False except __HOLE__: patched = False try: return function(*args, **kw) finally: if patched: DirectorySandbox._violation = DirectorySandbox._old del DirectorySandbox._old return __no_sandbox
ImportError
dataset/ETHPy150Open aerosol/django-dilla/distribute_setup.py/_no_sandbox
4,511
def _fake_setuptools(): log.warn('Scanning installed packages') try: import pkg_resources except __HOLE__: # we're cool log.warn('Setuptools or Distribute does not seem to be installed.') return ws = pkg_resources.working_set try: setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools', replacement=False)) except TypeError: # old distribute API setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools')) if setuptools_dist is None: log.warn('No setuptools distribution found') return # detecting if it was already faked setuptools_location = setuptools_dist.location log.warn('Setuptools installation detected at %s', setuptools_location) # if --root or --preix was provided, and if # setuptools is not located in them, we don't patch it if not _under_prefix(setuptools_location): log.warn('Not patching, --root or --prefix is installing Distribute' ' in another location') return # let's see if its an egg if not setuptools_location.endswith('.egg'): log.warn('Non-egg installation') res = _remove_flat_installation(setuptools_location) if not res: return else: log.warn('Egg installation') pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') if (os.path.exists(pkg_info) and _same_content(pkg_info, SETUPTOOLS_PKG_INFO)): log.warn('Already patched.') return log.warn('Patching...') # let's create a fake egg replacing setuptools one res = _patch_egg_dir(setuptools_location) if not res: return log.warn('Patched done.') _relaunch()
ImportError
dataset/ETHPy150Open aerosol/django-dilla/distribute_setup.py/_fake_setuptools
4,512
def create_fields(self): for coll in self.colls: logging.debug("Creating fields for {}".format(coll)) for field in test_config['collections']['fields']: try: self.solr.schema.create_field(coll,field) except __HOLE__: #Filed already exists probably pass
ValueError
dataset/ETHPy150Open moonlitesolutions/SolrClient/test/test_reindexer.py/ReindexerTests.create_fields
4,513
def create_copy_fields(self): for coll in self.colls: logging.debug("Creating copy fields for {}".format(coll)) for field in test_config['collections']['copy_fields']: try: self.solr.schema.create_copy_field(coll,field) except __HOLE__: #Filed already exists probably pass
ValueError
dataset/ETHPy150Open moonlitesolutions/SolrClient/test/test_reindexer.py/ReindexerTests.create_copy_fields
4,514
def test_solr_to_solr_with_date(self): self._index_docs(50000,self.colls[0]) solr = SolrClient(test_config['SOLR_SERVER'][0], devel=True, auth=test_config['SOLR_CREDENTIALS']) reindexer = Reindexer(source=solr, source_coll='source_coll', dest=solr, dest_coll='dest_coll', date_field='index_date') reindexer.reindex() try: self.assertTrue(solr.transport._action_log[1]['params']['params']['sort']=='index_date asc, id desc') except __HOLE__: self.assertTrue(solr.transport._action_log[2]['params']['params']['sort']=='index_date asc, id desc') self.assertEqual( solr.query(self.colls[0],{'q':'*:*','rows':10000000}).docs.sort(key=lambda x: x['id']), solr.query(self.colls[1],{'q':'*:*','rows':10000000}).docs.sort(key=lambda x: x['id']), )
KeyError
dataset/ETHPy150Open moonlitesolutions/SolrClient/test/test_reindexer.py/ReindexerTests.test_solr_to_solr_with_date
4,515
def __new__(cls, name, bases, attrs): new_cls = type.__new__(cls, name, bases, attrs) if hasattr(new_cls, 'model'): typemapper[new_cls] = (new_cls.model, new_cls.is_anonymous) new_cls.__dir = dir(new_cls) for field in ('fields', 'model', 'exclude', 'allowed_methods', 'anonymous', 'is_anonymous', 'read', 'create', 'update', 'delete'): try: new_cls.__dir.remove(field) except __HOLE__: pass new_cls.__class__.__dir__ = lambda x: x.__dir return new_cls
ValueError
dataset/ETHPy150Open treeio/treeio/treeio/core/api/handlers.py/ObjectHandlerMetaClass.__new__
4,516
def read(self, request, *args, **kwargs): if not self.has_model(): return rc.NOT_IMPLEMENTED pkfield = kwargs.get(self.model._meta.pk.name) if pkfield: try: obj = self.model.objects.get(pk=pkfield) if not self.check_instance_permission(request, obj, 'r'): return rc.FORBIDDEN else: return obj except __HOLE__: return rc.NOT_FOUND # should never happen, since we're using a PK except MultipleObjectsReturned: return rc.BAD_REQUEST else: query = self.get_filter_query(request.GET) return self.model.filter_by_request(request, self.model.objects.filter(query))
ObjectDoesNotExist
dataset/ETHPy150Open treeio/treeio/treeio/core/api/handlers.py/ObjectHandler.read
4,517
def update(self, request, *args, **kwargs): if not self.has_model() or not self.has_form(): return rc.NOT_IMPLEMENTED pkfield = kwargs.get(self.model._meta.pk.name) or request.data.get( self.model._meta.pk.name) if not pkfield or request.data is None: return rc.BAD_REQUEST try: obj = self.model.objects.get(pk=pkfield) except __HOLE__: return rc.NOT_FOUND if not self.check_instance_permission(request, obj, "w"): return rc.FORBIDDEN attrs = self.flatten_dict(request) form = self.form(instance=obj, **attrs) if form.is_valid(): obj = form.save() return obj else: self.status = 400 return form.errors
ObjectDoesNotExist
dataset/ETHPy150Open treeio/treeio/treeio/core/api/handlers.py/ObjectHandler.update
4,518
def tidy_xml(filename): ''' read in file, screen out unsafe unicode characters, write back file in utf-8 :param filename: str :returns: False if unable to read from file ''' if not os.path.isfile(filename): raise ValueError('file does not exist') # try first utf-8 then iso. This is ugly, but the files in # question that are problematic do not declare unicode type data = None for ftype in ['utf-8', 'iso8859-1']: fhand = None try: fhand = codecs.open(filename, 'r', ftype) data = fhand.read() break except __HOLE__: continue finally: if fhand is not None: fhand.close() if data is None: return False for match in _SAFE_XML_REGEX.finditer(data): data = data[:match.start()] + '?' + data[match.end():] with open(filename, 'w') as fhand: fhand.write(data) return True
ValueError
dataset/ETHPy150Open ros/catkin/python/catkin/tidy_xml.py/tidy_xml
4,519
def _interpret_hadoop_jar_command_stderr(stderr, record_callback=None): """Parse stderr from the ``hadoop jar`` command. Works like :py:func:`_parse_step_log` (same return format) with a few extra features to handle the output of the ``hadoop jar`` command on the fly: - Converts ``bytes`` lines to ``str`` - Pre-filters non-log4j stuff from Hadoop Streaming so it doesn't get treated as part of a multi-line message - Handles "stderr" from a PTY (including treating EIO as EOF and pre-filtering stdout lines from Hadoop Streaming) - Optionally calls *record_callback* for each log4j record (see :py:func:`~mrjob.logs.log4j._parse_hadoop_log4j_records`). """ def yield_lines(): try: for line in stderr: yield to_string(line) except __HOLE__ as e: # this is just the PTY's way of saying goodbye if e.errno == errno.EIO: return else: raise def pre_filter(line): return bool(_HADOOP_STREAMING_NON_LOG4J_LINE_RE.match(line)) def yield_records(): for record in _parse_hadoop_log4j_records(yield_lines(), pre_filter=pre_filter): if record_callback: record_callback(record) yield record result = _parse_step_log_from_log4j_records(yield_records()) _add_implied_job_id(result) for error in result.get('errors') or (): _add_implied_task_id(error) return result
IOError
dataset/ETHPy150Open Yelp/mrjob/mrjob/logs/step.py/_interpret_hadoop_jar_command_stderr
4,520
def what(self, page, args): level = int(args.get('level', 1)) if page.level + 1 == level: return page.get_absolute_url() elif page.level + 1 < level: return '#' try: return page.get_ancestors()[level - 1].get_absolute_url() except __HOLE__: return '#'
IndexError
dataset/ETHPy150Open feincms/feincms/feincms/templatetags/feincms_page_tags.py/ParentLinkNode.what
4,521
def _translate_page_into(page, language, default=None): """ Return the translation for a given page """ # Optimisation shortcut: No need to dive into translations if page already # what we want try: if page.language == language: return page if language is not None: translations = dict( (t.language, t) for t in page.available_translations()) if language in translations: return translations[language] except __HOLE__: pass if hasattr(default, '__call__'): return default(page=page) return default # ------------------------------------------------------------------------
AttributeError
dataset/ETHPy150Open feincms/feincms/feincms/templatetags/feincms_page_tags.py/_translate_page_into
4,522
@register.filter def is_parent_of(page1, page2): """ Determines whether a given page is the parent of another page Example:: {% if page|is_parent_of:feincms_page %} ... {% endif %} """ try: return page1.is_ancestor_of(page2) except (AttributeError, __HOLE__): return False # ------------------------------------------------------------------------
ValueError
dataset/ETHPy150Open feincms/feincms/feincms/templatetags/feincms_page_tags.py/is_parent_of
4,523
@register.filter def is_equal_or_parent_of(page1, page2): """ Determines whether a given page is equal to or the parent of another page. This is especially handy when generating the navigation. The following example adds a CSS class ``current`` to the current main navigation entry:: {% for page in navigation %} <a {% if page|is_equal_or_parent_of:feincms_page %} class="current" {% endif %} >{{ page.title }}</a> {% endfor %} """ try: return page1.is_ancestor_of(page2, include_self=True) except (AttributeError, __HOLE__): return False # ------------------------------------------------------------------------
ValueError
dataset/ETHPy150Open feincms/feincms/feincms/templatetags/feincms_page_tags.py/is_equal_or_parent_of
4,524
@register.filter def is_sibling_of(page1, page2): """ Determines whether a given page is a sibling of another page :: {% if page|is_sibling_of:feincms_page %} ... {% endif %} """ try: return _is_sibling_of(page1, page2) except __HOLE__: return False # ------------------------------------------------------------------------
AttributeError
dataset/ETHPy150Open feincms/feincms/feincms/templatetags/feincms_page_tags.py/is_sibling_of
4,525
@register.filter def siblings_along_path_to(page_list, page2): """ Filters a list of pages so that only those remain that are either: * An ancestor of the current page * A sibling of an ancestor of the current page A typical use case is building a navigation menu with the active path to the current page expanded:: {% feincms_nav feincms_page level=1 depth=3 as navitems %} {% with navitems|siblings_along_path_to:feincms_page as navtree %} ... whatever ... {% endwith %} """ if page_list: try: # Try to avoid hitting the database: If the current page is # in_navigation, then all relevant pages are already in the # incoming list, no need to fetch ancestors or children. # NOTE: This assumes that the input list actually is complete (ie. # comes from feincms_nav). We'll cope with the fall-out of that # assumption when it happens... ancestors = [ a_page for a_page in page_list if a_page.is_ancestor_of(page2, include_self=True)] top_level = min((a_page.level for a_page in page_list)) if not ancestors: # Happens when we sit on a page outside the navigation tree so # fake an active root page to avoid a get_ancestors() db call # which would only give us a non-navigation root page anyway. page_class = _get_page_model() p = page_class( title="dummy", tree_id=-1, parent_id=None, in_navigation=False) ancestors = (p,) siblings = [ a_page for a_page in page_list if a_page.parent_id == page2.id or a_page.level == top_level or any((_is_sibling_of(a_page, a) for a in ancestors))] return siblings except (AttributeError, __HOLE__) as e: logger.warn( "siblings_along_path_to caught exception: %s", format_exception(e)) return () # ------------------------------------------------------------------------
ValueError
dataset/ETHPy150Open feincms/feincms/feincms/templatetags/feincms_page_tags.py/siblings_along_path_to
4,526
def create_app(): app = Flask('easymode') app.config['TESTING'] = True app.config['SECRET_KEY'] = '123454fdsafdsfdsfdsfds' @app.route('/') def index(): return 'I am the index page.' @app.route('/xhr') @xhr_api() def xhr_endpoint(): try: g.xhr.data['test'] = 'monkey' g.xhr.data['walrus'] = 'punch' except AttributeError: pass flash('This is a test message.') flash('This is a warning.', 'warning') flash('This is an error', 'error') flash('This is just some info', 'info') flash('This is another warning', 'warning') @app.route('/xhr-failure') @xhr_api() def xhr_failure(): try: g.xhr.data['test'] = 'monkey' except AttributeError: pass raise XHRError('Disaster everywhere.') @app.route('/xhr-failure-with-code') @xhr_api() def xhr_failure_with_code(): try: g.xhr.data['test'] = 'monkey' except AttributeError: pass raise XHRError('Disaster befalls the city', status_code=500) @app.route('/xhr-that-returns-something') @xhr_api() def xhr_that_returns_something(): try: g.xhr.data['test'] = 'monkey' except AttributeError: pass return 'Here is some string that would never be returned if the XHR API were active.' @app.route('/xhr-that-allows-regular-http') @xhr_api(allow_http=True) def xhr_that_allows_regular_http(): try: g.xhr.data['test'] = 'monkey' except __HOLE__: pass flash('A message in a bottle.') return 'Here is some regular return stuff' @app.route('/inject/<injectable_class_slug_name>') @inject('injectable_class') def inject_test_class_slug_name(): return 'I have been injected with %s' % g.injectable_class.slug_name @app.route('/inject-as-arg/<injectable_class_slug_name>') @inject('injectable_class', as_args=True) def inject_test_class_args(injectable_class): return 'I have been injected with %s' % injectable_class.slug_name @app.route('/inject-non-injectable/<non_injectable_class_slug_name>') @inject('non_injectable_class') def inject_the_noninjectable(): return 'This will never happen because there will be an exception :(' @app.route('/inject-skip-by-default', defaults={'injectable_class_slug_name': None}) @app.route('/inject-skip-by-default/<injectable_class_slug_name>') @inject('injectable_class', as_args=True) def inject_skip_by_default(injectable_class): return injectable_class.slug_name @app.route('/inject-list-denoting/<injectable_class_category_name>') @inject('injectable_class', default='skip', lists='denote', as_args=True) def inject_list_denoting(injectable_class_list): return str(injectable_class_list) return app
AttributeError
dataset/ETHPy150Open petermelias/flask-easymode/flask_easymode/tests/__init__.py/create_app
4,527
def port_ranges(): """ Returns a list of ephemeral port ranges for current machine. """ try: return _linux_ranges() except (OSError, IOError): # not linux, try BSD try: ranges = _bsd_ranges() if ranges: return ranges except (__HOLE__, IOError): pass # fallback return [DEFAULT_EPHEMERAL_PORT_RANGE]
OSError
dataset/ETHPy150Open kmike/port-for/port_for/ephemeral.py/port_ranges
4,528
def _bsd_ranges(): pp = subprocess.Popen(['sysctl', 'net.inet.ip.portrange'], stdout=subprocess.PIPE) stdout, stderr = pp.communicate() lines = stdout.decode('ascii').split('\n') out = dict([[x.strip().rsplit('.')[-1] for x in l.split(':')] for l in lines if l]) ranges = [ # FreeBSD & Mac ('first', 'last'), ('lowfirst', 'lowlast'), ('hifirst', 'hilast'), # OpenBSD ('portfirst', 'portlast'), ('porthifirst', 'porthilast'), ] res = [] for rng in ranges: try: low, high = int(out[rng[0]]), int(out[rng[1]]) if low <= high: res.append((low, high)) except __HOLE__: pass return res
KeyError
dataset/ETHPy150Open kmike/port-for/port_for/ephemeral.py/_bsd_ranges
4,529
def clean(self): try: paymentmethod = self.cleaned_data['paymentmethod'] except __HOLE__: self._errors['paymentmethod'] = forms.util.ErrorList([_('This field is required')]) return self.cleaned_data required_fields = self.payment_required_fields.get(paymentmethod, []) msg = _('Selected payment method requires this field to be filled') for fld in required_fields: if not (self.cleaned_data.has_key(fld) and self.cleaned_data[fld]): self._errors[fld] = forms.util.ErrorList([msg]) elif fld == 'state': self.enforce_state = True try: self._check_state(self.cleaned_data['state'], self.cleaned_data['country']) except forms.ValidationError, e: self._errors[fld] = e.messages super(PaymentContactInfoForm, self).clean() return self.cleaned_data
KeyError
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/payment/forms.py/PaymentContactInfoForm.clean
4,530
def clean_ccv(self): """ Validate a proper CCV is entered. Remember it can have a leading 0 so don't convert to int and return it""" try: check = int(self.cleaned_data['ccv']) return self.cleaned_data['ccv'].strip() except __HOLE__: raise forms.ValidationError(_('Invalid ccv.'))
ValueError
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/payment/forms.py/CreditPayShipForm.clean_ccv
4,531
def initialize_extensions(shell, extensions): """ Partial copy of `InteractiveShellApp.init_extensions` from IPython. """ try: iter(extensions) except __HOLE__: pass # no extensions found else: for ext in extensions: try: shell.extension_manager.load_extension(ext) except: ipy_utils.warn.warn( "Error in loading extension: %s" % ext + "\nCheck your config files in %s" % ipy_utils.path.get_ipython_dir()) shell.showtraceback()
TypeError
dataset/ETHPy150Open jonathanslenders/ptpython/ptpython/ipython.py/initialize_extensions
4,532
def __getattr__(self, key): try: return self[key] except __HOLE__: raise AttributeError(key)
KeyError
dataset/ETHPy150Open celery/kombu/kombu/transport/base.py/Implements.__getattr__
4,533
def find_systemjs_location(): """ Figure out where `jspm_packages/system.js` will be put by JSPM. """ location = os.path.abspath(os.path.dirname(locate_package_json())) conf = parse_package_json() if 'jspm' in conf: conf = conf['jspm'] try: conf = conf['directories'] except __HOLE__: raise ImproperlyConfigured("`package.json` doesn't appear to be a valid json object. " "Location: %s" % location) except KeyError: raise ImproperlyConfigured("The `directories` configuarion was not found in package.json. " "Please check your jspm install and/or configuarion. `package.json` " "location: %s" % location) # check for explicit location, else fall back to the default as jspm does jspm_packages = conf['packages'] if 'packages' in conf else 'jspm_packages' base = conf['baseURL'] if 'baseURL' in conf else '.' return os.path.join(location, base, jspm_packages, 'system.js')
TypeError
dataset/ETHPy150Open sergei-maertens/django-systemjs/systemjs/jspm.py/find_systemjs_location
4,534
def register_response(self, command, kwargs, response): """ Register a response to a L{callRemote} command. @param commandType: a subclass of C{amp.Command}. @param kwargs: Keyword arguments taken by the command, a C{dict}. @param response: The response to the command. """ if isinstance(response, Exception): response = Failure(response) else: try: command.makeResponse(response, AMP()) except __HOLE__: raise InvalidSignature("Bad registered response") self._responses[self._makeKey(command, kwargs)] = response
KeyError
dataset/ETHPy150Open ClusterHQ/flocker/flocker/testtools/amp.py/FakeAMPClient.register_response
4,535
def run(self): uri = self.arguments[0] try: handler = _RAPI_RESOURCES_FOR_DOCS[uri] except __HOLE__: raise self.error("Unknown resource URI '%s'" % uri) lines = [ ".. list-table::", " :widths: 1 4", " :header-rows: 1", "", " * - Method", " - :ref:`Required permissions <rapi-users>`", ] for method in _GetHandlerMethods(handler): lines.extend([ " * - :ref:`%s <%s>`" % (method, _MakeRapiResourceLink(method, uri)), " - %s" % _DescribeHandlerAccess(handler, method), ]) # Inject into state machine include_lines = \ docutils.statemachine.string2lines("\n".join(lines), _TAB_WIDTH, convert_whitespace=1) self.state_machine.insert_input(include_lines, self.__class__.__name__) return []
KeyError
dataset/ETHPy150Open ganeti/ganeti/lib/build/sphinx_ext.py/RapiResourceDetails.run
4,536
def test_strftime(self): tt = time.gmtime(self.t) for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I', 'j', 'm', 'M', 'p', 'S', 'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'): format = ' %' + directive try: time.strftime(format, tt) except __HOLE__: self.fail('conversion specifier: %r failed.' % format)
ValueError
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_time.py/TimeTestCase.test_strftime
4,537
def test_strptime(self): tt = time.gmtime(self.t) for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I', 'j', 'm', 'M', 'p', 'S', 'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'): format = ' %' + directive try: time.strptime(time.strftime(format, tt), format) except __HOLE__: self.fail('conversion specifier: %r failed.' % format)
ValueError
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_time.py/TimeTestCase.test_strptime
4,538
def parse_spec(file): classes = {} cur = None for line in fileinput.input(file): if line.strip().startswith('#'): continue mo = rx_init.search(line) if mo is None: if cur is None: # a normal entry try: name, args = line.split(':') except __HOLE__: continue classes[name] = NodeInfo(name, args) cur = None else: # some code for the __init__ method cur.init.append(line) else: # some extra code for a Node's __init__ method name = mo.group(1) cur = classes[name] return sorted(classes.values(), key=lambda n: n.name)
ValueError
dataset/ETHPy150Open anandology/pyjamas/pgen/astgen.skult.py/parse_spec
4,539
def _load_tasks_file(task_id, path, batch): """ :type task_id: vulyk.models.task_types.AbstractTaskType :type path: str | unicode :param batch: Batch ID tasks should be loaded into :type batch: str | unicode :return: Number of stored tasks :rtype: int """ i = 0 bunch_size = 100 def _safe_load(fl): """ :type fl: file :rtype: __generator[dict] """ l = lambda s: json.loads(s) if len(s.strip()) > 0 else {} return ifilter(None, imap(l, fl)) try: with open_anything(path)(path, 'rb') as f: for chunk in chunked(_safe_load(f), bunch_size): task_id.import_tasks(chunk, batch) i += len(chunk) echo('{0:d} tasks processed'.format(i)) except ValueError as e: echo('Error while decoding json in {0}: {1}'.format(path, e)) except __HOLE__ as e: echo('Got IO error when tried to decode {0}: {1}'.format(path, e)) echo('Finished loading {0:d} tasks'.format(i)) return i
IOError
dataset/ETHPy150Open mrgambal/vulyk/vulyk/cli/db.py/_load_tasks_file
4,540
def export_tasks(task_id, path, batch, closed): """ :type task_id: vulyk.models.task_types.AbstractTaskType :type path: str | unicode :type batch: str | unicode :type closed: boolean """ i = 0 try: with open(path, 'w+') as f: for report in task_id.export_reports(batch, closed): f.write(json.dumps(report) + os.linesep) i += 1 if i + 1 % 100 == 0: echo('{0:d} tasks processed'.format(i)) except ValueError as e: echo('Error while encoding json in {0}: {1}'.format(path, e)) except __HOLE__ as e: echo('Got IO error when tried to decode {0}: {1}'.format(path, e)) echo('Finished exporting answers for {0:d} tasks'.format(i))
IOError
dataset/ETHPy150Open mrgambal/vulyk/vulyk/cli/db.py/export_tasks
4,541
def __getattr__(self, param): #compatibility hack try: return self._data[param] except __HOLE__: raise AttributeError("%s has no attribute \"%s\"" % (self, param))
KeyError
dataset/ETHPy150Open OrbitzWorldwide/droned/romeo/lib/romeo/grammars/__init__.py/_Query.__getattr__
4,542
def loadAll(): """this method should only be called by the 'romeo' module""" #import all builtin query handlers now my_dir = _os.path.dirname(__file__) for filename in _os.listdir(my_dir): if not filename.endswith('.py'): continue if filename == '__init__.py': continue modname = filename[:-3] try: try: mod = __import__(__name__ + '.' + modname, fromlist=[modname]) except __HOLE__: #python2.4 __import__ implementation doesn't accept **kwargs mod = __import__(__name__ + '.' + modname, {}, {}, [modname]) except: _traceback.print_exc()
TypeError
dataset/ETHPy150Open OrbitzWorldwide/droned/romeo/lib/romeo/grammars/__init__.py/loadAll
4,543
def savitzky_golay(y, window_size, order, deriv=0): # code from from scipy cookbook """Smooth (and optionally differentiate) data with a Savitzky-Golay filter. The Savitzky-Golay filter removes high frequency noise from data. It has the advantage of preserving the original shape and features of the signal better than other types of filtering approaches, such as moving averages techhniques. Parameters ---------- y : array_like, shape (N,) the values of the time history of the signal. window_size : int the length of the window. Must be an odd integer number. order : int the order of the polynomial used in the filtering. Must be less then `window_size` - 1. deriv: int the order of the derivative to compute (default = 0 means only smoothing) Returns ------- ys : ndarray, shape (N) the smoothed signal (or it's n-th derivative). Notes ----- The Savitzky-Golay is a type of low-pass filter, particularly suited for smoothing noisy data. The main idea behind this approach is to make for each point a least-square fit with a polynomial of high order over a odd-sized window centered at the point. Examples -------- t = np.linspace(-4, 4, 500) y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape) ysg = savitzky_golay(y, window_size=31, order=4) import matplotlib.pyplot as plt plt.plot(t, y, label='Noisy signal') plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal') plt.plot(t, ysg, 'r', label='Filtered signal') plt.legend() plt.show() References ---------- .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), pp 1627-1639. .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery Cambridge University Press ISBN-13: 9780521880688 """ try: window_size = abs(int(window_size)) order = abs(int(order)) except __HOLE__(msg): raise ValueError("window_size and order have to be of type int") if window_size % 2 != 1 or window_size < 1: raise TypeError("window_size size must be a positive odd number") if window_size < order + 2: raise TypeError("window_size is too small for the polynomials order") order_range = range(order+1) half_window = (window_size -1) // 2 # precompute coefficients b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)]) m = np.linalg.pinv(b).A[deriv] # pad the signal at the extremes with # values taken from the signal itself firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] ) lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) y = np.concatenate((firstvals, y, lastvals)) return np.convolve(m, y, mode='valid') ### MAIN CLASS
ValueError
dataset/ETHPy150Open xraypy/xraylarch/plugins/io/specfiledata.py/savitzky_golay
4,544
def parse_inlinefunc(string, strip=False, **kwargs): """ Parse the incoming string. Args: string (str): The incoming string to parse. strip (bool, optional): Whether to strip function calls rather than execute them. Kwargs: session (Session): This is sent to this function by Evennia when triggering it. It is passed to the inlinefunc. kwargs (any): All other kwargs are also passed on to the inlinefunc. """ global _PARSING_CACHE if string in _PARSING_CACHE: # stack is already cached stack = _PARSING_CACHE[string] else: # not a cached string. if not _RE_STARTTOKEN.search(string): # if there are no unescaped start tokens at all, return immediately. return string # build a new cache entry stack = ParseStack() ncallable = 0 for match in _RE_TOKEN.finditer(string): gdict = match.groupdict() if gdict["singlequote"]: stack.append(gdict["singlequote"]) elif gdict["doublequote"]: stack.append(gdict["doublequote"]) elif gdict["end"]: if ncallable <= 0: stack.append(")") continue args = [] while stack: operation = stack.pop() if callable(operation): if not strip: stack.append((operation, [arg for arg in reversed(args)])) ncallable -= 1 break else: args.append(operation) elif gdict["start"]: funcname = _RE_STARTTOKEN.match(gdict["start"]).group(1) try: # try to fetch the matching inlinefunc from storage stack.append(_INLINE_FUNCS[funcname]) except __HOLE__: stack.append(_INLINE_FUNCS["nomatch"]) stack.append(funcname) ncallable += 1 elif gdict["escaped"]: # escaped tokens token = gdict["escaped"].lstrip("\\") stack.append(token) elif gdict["comma"]: if ncallable > 0: # commas outside strings and inside a callable are # used to mark argument separation - we use None # in the stack to indicate such a separation. stack.append(None) else: # no callable active - just a string stack.append(",") else: # the rest stack.append(gdict["rest"]) if ncallable > 0: # this means not all inlinefuncs were complete return string if _STACK_MAXSIZE > 0 and _STACK_MAXSIZE < len(stack): # if stack is larger than limit, throw away parsing return string + gdict["stackfull"](*args, **kwargs) else: # cache the result _PARSING_CACHE[string] = stack # run the stack recursively def _run_stack(item, depth=0): retval = item if isinstance(item, tuple): if strip: return "" else: func, arglist = item args = [""] for arg in arglist: if arg is None: # an argument-separating comma - start a new arg args.append("") else: # all other args should merge into one string args[-1] += _run_stack(arg, depth=depth+1) # execute the inlinefunc at this point or strip it. kwargs["inlinefunc_stack_depth"] = depth retval = "" if strip else func(*args, **kwargs) return utils.to_str(retval, force_string=True) # execute the stack from the cache return "".join(_run_stack(item) for item in _PARSING_CACHE[string])
KeyError
dataset/ETHPy150Open evennia/evennia/evennia/utils/nested_inlinefuncs.py/parse_inlinefunc
4,545
def parse_and_bind(self, string): '''Parse and execute single line of a readline init file.''' try: log('parse_and_bind("%s")' % string) if string.startswith('#'): return if string.startswith('set'): m = re.compile(r'set\s+([-a-zA-Z0-9]+)\s+(.+)\s*$').match(string) if m: var_name = m.group(1) val = m.group(2) try: setattr(self.mode, var_name.replace('-','_'), val) except __HOLE__: log('unknown var="%s" val="%s"' % (var_name, val)) else: log('bad set "%s"' % string) return m = re.compile(r'\s*(.+)\s*:\s*([-a-zA-Z]+)\s*$').match(string) if m: key = m.group(1) func_name = m.group(2) py_name = func_name.replace('-', '_') try: func = getattr(self.mode, py_name) except AttributeError: log('unknown func key="%s" func="%s"' % (key, func_name)) if self.debug: print('pyreadline parse_and_bind error, unknown function to bind: "%s"' % func_name) return self.mode._bind_key(key, func) except: log('error') raise
AttributeError
dataset/ETHPy150Open Ali-Razmjoo/OWASP-ZSC/module/readline_windows/pyreadline/rlmain.py/BaseReadline.parse_and_bind
4,546
def _readline_from_keyboard_poll(self): pastebuffer = self.mode.paste_line_buffer if len(pastebuffer) > 0: #paste first line in multiline paste buffer self.l_buffer = lineobj.ReadLineTextBuffer(pastebuffer[0]) self._update_line() self.mode.paste_line_buffer = pastebuffer[1:] return True c = self.console def nop(e): pass try: event = c.getkeypress() except __HOLE__: event = self.handle_ctrl_c() try: result = self.mode.process_keyevent(event.keyinfo) except EOFError: logger.stop_logging() raise self._update_line() return result
KeyboardInterrupt
dataset/ETHPy150Open Ali-Razmjoo/OWASP-ZSC/module/readline_windows/pyreadline/rlmain.py/Readline._readline_from_keyboard_poll
4,547
def post_notice(request): current_site = Site.objects.get_current() oauth_req = OAuthRequest.from_request(request.method, request.build_absolute_uri(), headers=request.META, parameters=request.POST.copy()) if not oauth_req: return HttpResponse("Not a OAuthRequest", mimetype="text/plain") else: oauth_server = OAuthServer(data_store=DataStore(oauth_req), signature_methods=signature_methods) oauth_server.verify_request(oauth_req) # TOOD Refactor this into something like omb.post_notice version = oauth_req.get_parameter('omb_version') if version != OMB_VERSION_01: return HttpResponse("Unsupported OMB version", mimetype="text/plain") listenee = oauth_req.get_parameter('omb_listenee') try: remote_profile = RemoteProfile.objects.get(uri=listenee) except __HOLE__: return HttpResponse("Profile unknown", mimetype="text/plain") content = oauth_req.get_parameter('omb_notice_content') if not content or len(content) > 140: return HttpResponse("Invalid notice content", mimetype="text/plain") notice_uri = oauth_req.get_parameter('omb_notice') notice_url = oauth_req.get_parameter("omb_notice_url") notice_app_label, notice_model_name = settings.OMB_NOTICE_MODULE.split('.') noticeModel = models.get_model(notice_app_label, notice_model_name) notice = noticeModel() notice.sender = remote_profile notice.text = content notice.save() return HttpResponse("omb_version=%s" % OMB_VERSION_01, mimetype="text/plain")
ObjectDoesNotExist
dataset/ETHPy150Open skabber/django-omb/omb/views.py/post_notice
4,548
def updateprofile(request): oauth_req = OAuthRequest.from_request(request.method, request.build_absolute_uri(), headers=request.META, parameters=request.POST.copy()) return HttpResponse("update profile", mimetype="text/plain") if not oauth_req: return HttpResponse("Not a OAuthRequest", mimetype="text/plain") else: oauth_server = OAuthServer(data_store=DataStore(oauth_req), signature_methods=signature_methods) oauth_server.verify_request(oauth_req) # TOOD Refactor this into something like omb.update_profile omb_version = oauth_req.get_parameter('omb_version') if omb_version != OMB_VERSION_01: return HttpResponse("Unsupported OMB version", mimetype="text/plain") omb_listenee = oauth_req.get_parameter('omb_listenee') try: remote_profile = RemoteProfile.objects.get(uri=omb_listenee) omb_listenee_profile = _get_oauth_param(oauth_req, 'omb_listenee_profile') if omb_listenee_profile != None: remote_profile.url = omb_listenee_profile omb_listenee_nickname = _get_oauth_param(oauth_req, 'omb_listenee_nickname') if omb_listenee_nickname != None: remote_profile.username = omb_listenee_nickname omb_listenee_license = _get_oauth_param(oauth_req, 'omb_listenee_license') if omb_listenee_license != None: remote_profile.license = omb_listenee_license omb_listenee_fullname = _get_oauth_param(oauth_req, 'omb_listenee_fullname') if omb_listenee_fullname != None: remote_profile.fullname = omb_listenee_fullname omb_listenee_homepage = _get_oauth_param(oauth_req, 'omb_listenee_homepage') if omb_listenee_homepage != None: remote_profile.homepage = omb_listenee_homepage omb_listenee_bio = _get_oauth_param(oauth_req, 'omb_listenee_bio') if omb_listenee_bio != None: remote_profile.bio = omb_listenee_bio omb_listenee_location = _get_oauth_param(oauth_req, 'omb_listenee_location') if omb_listenee_location != None: remote_profile.location = omb_listenee_location omb_listenee_avatar = _get_oauth_param(oauth_req, 'omb_listenee_avatar') if omb_listenee_avatar != None: remote_profile.avatar = omb_listenee_avatar remote_profile.save() return HttpResponse("omb_version=%s" % OMB_VERSION_01, mimetype="text/plain") except __HOLE__: return HttpResponse("Profile unknown", mimetype="text/plain")
ObjectDoesNotExist
dataset/ETHPy150Open skabber/django-omb/omb/views.py/updateprofile
4,549
def omb_request_token(request): consumer_key = request.REQUEST.get("oauth_consumer_key") try: Consumer.objects.get(name=consumer_key, key=consumer_key) except __HOLE__: Consumer.objects.create(name=consumer_key, key=consumer_key) response = request_token(request) return response
ObjectDoesNotExist
dataset/ETHPy150Open skabber/django-omb/omb/views.py/omb_request_token
4,550
def getimagesize(url): """ Attempts to determine an image's width and height, and returns a string suitable for use in an <img> tag, or an empty string in case of failure. Requires that PIL is installed. >>> getimagesize("http://www.google.com/intl/en_ALL/images/logo.gif") ... #doctest: +ELLIPSIS 'width="..." height="..."' >>> getimagesize("http://bad.domain/") '' """ try: from PIL import ImageFile import urllib.request, urllib.error, urllib.parse except ImportError: return '' try: p = ImageFile.Parser() f = urllib.request.urlopen(url) while True: s = f.read(1024) if not s: break p.feed(s) if p.image: return 'width="%i" height="%i"' % p.image.size except (IOError, __HOLE__): return ''
ValueError
dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python3/textile/tools/imagesize.py/getimagesize
4,551
def setup_module(module): from nose.plugins.skip import SkipTest try: from PIL import ImageFile except __HOLE__: raise SkipTest()
ImportError
dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python3/textile/tools/imagesize.py/setup_module
4,552
def get_average_depth(self, settings, data): """ compute average depth of tree (averaged over training data) = depth of a leaf weighted by fraction of training data at that leaf """ self.depth_nodes = {self.root: 0} tmp_node_list = [self.root] n_total = 0. average_depth = 0. self.node_size_by_depth = defaultdict(list) leaf_node_sizes = [] while True: try: node_id = tmp_node_list.pop(0) except __HOLE__: break if node_id.is_leaf: if settings.optype == 'class': n_points_node = np.sum(self.counts[node_id]) else: n_points_node = self.n_points[node_id] n_total += n_points_node average_depth += n_points_node * self.depth_nodes[node_id] self.node_size_by_depth[self.depth_nodes[node_id]].append(node_id.sum_range_d) if not node_id.is_leaf: self.depth_nodes[node_id.left] = self.depth_nodes[node_id] + 1 self.depth_nodes[node_id.right] = self.depth_nodes[node_id] + 1 tmp_node_list.extend([node_id.left, node_id.right]) else: leaf_node_sizes.append(node_id.sum_range_d) assert data['n_train'] == int(n_total) average_depth /= n_total average_leaf_node_size = np.mean(leaf_node_sizes) average_node_size_by_depth = {} for k in self.node_size_by_depth: average_node_size_by_depth[k] = np.mean(self.node_size_by_depth[k]) return (average_depth, average_leaf_node_size, average_node_size_by_depth)
IndexError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.get_average_depth
4,553
def gen_node_ids_print(self): """ generate binary string label for each node root_node is denoted by empty string "e" all other node labels are defined as follows: left(j) = j0, right(j) = j1 e.g. left and right child of root_node are 0 and 1 respectively, left and right of node 0 are 00 and 01 respectively and so on. """ node_ids = [self.root] self.node_ids_print = {self.root: ''} while node_ids: node_id = node_ids.pop(0) try: feat_id, split = self.node_info[node_id] left, right = node_id.left, node_id.right node_ids.append(left) node_ids.append(right) self.node_ids_print[left] = self.node_ids_print[node_id] + '0' self.node_ids_print[right] = self.node_ids_print[node_id] + '1' except __HOLE__: continue
KeyError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.gen_node_ids_print
4,554
def print_tree(self, settings): """ prints some tree statistics: leaf nodes, non-leaf nodes, information and so on """ self.gen_node_ids_print() print 'printing tree:' print 'len(leaf_nodes) = %s, len(non_leaf_nodes) = %s' \ % (len(self.leaf_nodes), len(self.non_leaf_nodes)) print 'node_info =' node_ids = [self.root] while node_ids: node_id = node_ids.pop(0) node_id_print = self.node_ids_print[node_id] try: feat_id, split = self.node_info[node_id] print '%10s, feat = %5d, split = %.2f, node_id = %s' % \ (node_id_print, feat_id, split, node_id) if settings.optype == 'class': print 'counts = %s' % self.counts[node_id] else: print 'n_points = %6d, sum_y = %.2f' % (self.n_points[node_id], self.sum_y[node_id]) left, right = node_id.left, node_id.right node_ids.append(left) node_ids.append(right) except __HOLE__: continue print 'leaf info =' for node_id in self.leaf_nodes: node_id_print = self.node_ids_print[node_id] print '%10s, train_ids = %s, node_id = %s' % \ (node_id_print, self.train_ids[node_id], node_id) if settings.optype == 'class': print 'counts = %s' % self.counts[node_id] else: print 'n_points = %6d, sum_y = %.2f' % (self.n_points[node_id], self.sum_y[node_id])
KeyError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.print_tree
4,555
def update_gaussian_hyperparameters_indep(self, param, data, settings): n_points = float(self.n_points[self.root]) self.prior_mean, self.prior_variance = self.get_node_mean_and_variance(self.root) self.prior_precision = 1.0 / self.prior_variance self.cumulative_split_costs = {} self.leaf_means = [] self.leaf_variances = [] node_means = [] d_node_means = {self.root: self.prior_mean} node_parent_means = [] node_split_times = [] node_parent_split_times = [] if self.root.is_leaf: self.cumulative_split_costs[self.root] = 0. remaining = [] self.max_split_time = 0.1 # NOTE: initial value, need to specify non-zero value else: self.cumulative_split_costs[self.root] = self.max_split_costs[self.root] remaining = [self.root.left, self.root.right] self.max_split_time = self.cumulative_split_costs[self.root] + 0 node_split_times.append(self.cumulative_split_costs[self.root]) node_parent_split_times.append(0.) node_means.append(self.prior_mean) node_parent_means.append(self.prior_mean) while True: try: node_id = remaining.pop(0) except __HOLE__: break self.cumulative_split_costs[node_id] = self.cumulative_split_costs[node_id.parent] \ + self.max_split_costs[node_id] node_mean, node_variance = self.get_node_mean_and_variance(node_id) node_split_times.append(self.cumulative_split_costs[node_id]) node_parent_split_times.append(self.cumulative_split_costs[node_id.parent]) node_means.append(node_mean) node_parent_means.append(d_node_means[node_id.parent]) d_node_means[node_id] = node_mean if not node_id.is_leaf: remaining.append(node_id.left) remaining.append(node_id.right) self.max_split_time = max(self.max_split_time, self.cumulative_split_costs[node_id]) else: self.leaf_means.append(node_mean) self.leaf_variances.append(node_variance) #self.noise_variance = np.max(self.leaf_variances) self.noise_variance = np.mean(self.leaf_variances) self.noise_precision = 1.0 / self.noise_variance self.sigmoid_coef = 3. / self.max_split_time #self.sigmoid_coef = data['n_dim'] #self.sigmoid_coef = data['n_dim'] / 5 #self.sigmoid_coef = data['n_dim'] / (2. * np.log2(n_points)) #self.sigmoid_coef = data['n_dim'] / (2. * np.log2(n_points)) #self.sigmoid_coef = data['n_dim'] / (n_points) #self.variance_leaf_from_root = 2 * np.mean((np.array(self.leaf_means) - self.prior_mean) ** 2) # set sd to 3 times the empirical sd so that leaf node means are highly plausible (avoid too much smoothing) #self.variance_coef = 1.0 * self.variance_leaf_from_root if self.root.is_leaf: self.variance_coef = 1.0 else: node_means = np.array(node_means) node_parent_means = np.array(node_parent_means) node_split_times = np.array(node_split_times) node_parent_split_times = np.array(node_parent_split_times) tmp_den = sigmoid(self.sigmoid_coef * node_split_times) \ - sigmoid(self.sigmoid_coef * node_parent_split_times) tmp_num = (node_means - node_parent_means) ** 2 variance_coef_est = np.mean(tmp_num / tmp_den) self.variance_coef = variance_coef_est print 'sigmoid_coef = %.3f, variance_coef = %.3f' % (self.sigmoid_coef, variance_coef_est)
IndexError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.update_gaussian_hyperparameters_indep
4,556
def gen_cumulative_split_costs_only(self, settings, data): """ creates node_id.cumulative_split_cost as well as a dictionary self.cumulative_split_costs helper function for draw_tree """ self.cumulative_split_costs = {} if self.root.is_leaf: self.cumulative_split_costs[self.root] = 0. remaining = [] else: self.cumulative_split_costs[self.root] = self.max_split_costs[self.root] remaining = [self.root.left, self.root.right] while True: try: node_id = remaining.pop(0) except __HOLE__: break self.cumulative_split_costs[node_id] = self.cumulative_split_costs[node_id.parent] \ + self.max_split_costs[node_id] if not node_id.is_leaf: remaining.append(node_id.left) remaining.append(node_id.right)
IndexError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.gen_cumulative_split_costs_only
4,557
def gen_node_list(self): """ generates an ordered node_list such that parent appears before children useful for updating predictive posteriors """ self.node_list = [self.root] i = -1 while True: try: i += 1 node_id = self.node_list[i] except __HOLE__: break if not node_id.is_leaf: self.node_list.extend([node_id.left, node_id.right])
IndexError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.gen_node_list
4,558
def predict_class(self, x_test, n_class, param, settings): """ predict new label (for classification tasks) """ pred_prob = np.zeros((x_test.shape[0], n_class)) prob_not_separated_yet = np.ones(x_test.shape[0]) prob_separated = np.zeros(x_test.shape[0]) node_list = [self.root] d_idx_test = {self.root: np.arange(x_test.shape[0])} while True: try: node_id = node_list.pop(0) except IndexError: break idx_test = d_idx_test[node_id] if len(idx_test) == 0: continue x = x_test[idx_test, :] expo_parameter = np.maximum(0, node_id.min_d - x).sum(1) + np.maximum(0, x - node_id.max_d).sum(1) prob_not_separated_now = np.exp(-expo_parameter * self.max_split_costs[node_id]) prob_separated_now = 1 - prob_not_separated_now if math.isinf(self.max_split_costs[node_id]): # rare scenario where test point overlaps exactly with a training data point idx_zero = expo_parameter == 0 # to prevent nan in computation above when test point overlaps with training data point prob_not_separated_now[idx_zero] = 1. prob_separated_now[idx_zero] = 0. # predictions for idx_test_zero # data dependent discounting (depending on how far test data point is from the mondrian block) idx_non_zero = expo_parameter > 0 idx_test_non_zero = idx_test[idx_non_zero] expo_parameter_non_zero = expo_parameter[idx_non_zero] base = self.get_prior_mean(node_id, param, settings) if np.any(idx_non_zero): num_tables_k, num_customers, num_tables = self.get_counts(self.cnt[node_id]) # expected discount (averaging over time of cut which is a truncated exponential) # discount = (expo_parameter_non_zero / (expo_parameter_non_zero + settings.discount_param)) * \ # (-np.expm1(-(expo_parameter_non_zero + settings.discount_param) * self.max_split_costs[node_id])) discount = (expo_parameter_non_zero / (expo_parameter_non_zero + settings.discount_param)) \ * (-np.expm1(-(expo_parameter_non_zero + settings.discount_param) * self.max_split_costs[node_id])) \ / (-np.expm1(-expo_parameter_non_zero * self.max_split_costs[node_id])) discount_per_num_customers = discount / num_customers pred_prob_tmp = num_tables * discount_per_num_customers[:, np.newaxis] * base \ + self.cnt[node_id] / num_customers - discount_per_num_customers[:, np.newaxis] * num_tables_k pred_prob[idx_test_non_zero, :] += prob_separated_now[idx_non_zero][:, np.newaxis] \ * prob_not_separated_yet[idx_test_non_zero][:, np.newaxis] * pred_prob_tmp prob_not_separated_yet[idx_test] *= prob_not_separated_now # predictions for idx_test_zero if math.isinf(self.max_split_costs[node_id]) and np.any(idx_zero): idx_test_zero = idx_test[idx_zero] pred_prob_node_id = self.compute_posterior_mean_normalized_stable(self.cnt[node_id], \ self.get_discount_node_id(node_id, settings), base, settings) pred_prob[idx_test_zero, :] += prob_not_separated_yet[idx_test_zero][:, np.newaxis] * pred_prob_node_id try: feat_id, split = self.node_info[node_id] cond = x[:, feat_id] <= split left, right = get_children_id(node_id) d_idx_test[left], d_idx_test[right] = idx_test[cond], idx_test[~cond] node_list.append(left) node_list.append(right) except __HOLE__: pass if True or settings.debug: check_if_zero(np.sum(np.abs(np.sum(pred_prob, 1) - 1))) return pred_prob
KeyError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.predict_class
4,559
def predict_real(self, x_test, y_test, param, settings): """ predict new label (for regression tasks) """ pred_mean = np.zeros(x_test.shape[0]) pred_second_moment = np.zeros(x_test.shape[0]) pred_sample = np.zeros(x_test.shape[0]) log_pred_prob = -np.inf * np.ones(x_test.shape[0]) prob_not_separated_yet = np.ones(x_test.shape[0]) prob_separated = np.zeros(x_test.shape[0]) node_list = [self.root] d_idx_test = {self.root: np.arange(x_test.shape[0])} while True: try: node_id = node_list.pop(0) except __HOLE__: break idx_test = d_idx_test[node_id] if len(idx_test) == 0: continue x = x_test[idx_test, :] expo_parameter = np.maximum(0, node_id.min_d - x).sum(1) + np.maximum(0, x - node_id.max_d).sum(1) prob_not_separated_now = np.exp(-expo_parameter * self.max_split_costs[node_id]) prob_separated_now = 1 - prob_not_separated_now if math.isinf(self.max_split_costs[node_id]): # rare scenario where test point overlaps exactly with a training data point idx_zero = expo_parameter == 0 # to prevent nan in computation above when test point overlaps with training data point prob_not_separated_now[idx_zero] = 1. prob_separated_now[idx_zero] = 0. # predictions for idx_test_zero idx_non_zero = expo_parameter > 0 idx_test_non_zero = idx_test[idx_non_zero] n_test_non_zero = len(idx_test_non_zero) expo_parameter_non_zero = expo_parameter[idx_non_zero] if np.any(idx_non_zero): # expected variance (averaging over time of cut which is a truncated exponential) # NOTE: expected variance is approximate since E[f(x)] not equal to f(E[x]) expected_cut_time = 1.0 / expo_parameter_non_zero if not np.isinf(self.max_split_costs[node_id]): tmp_exp_term_arg = -self.max_split_costs[node_id] * expo_parameter_non_zero tmp_exp_term = np.exp(tmp_exp_term_arg) expected_cut_time -= self.max_split_costs[node_id] * tmp_exp_term / (-np.expm1(tmp_exp_term_arg)) try: assert np.all(expected_cut_time >= 0.) except AssertionError: print tmp_exp_term_arg print tmp_exp_term print expected_cut_time print np.any(np.isnan(expected_cut_time)) print 1.0 / expo_parameter_non_zero raise AssertionError if not settings.smooth_hierarchically: pred_mean_tmp = self.sum_y[node_id] / float(self.n_points[node_id]) pred_second_moment_tmp = self.sum_y2[node_id] / float(self.n_points[node_id]) + param.noise_variance else: pred_mean_tmp, pred_second_moment_tmp = self.pred_moments[node_id] # FIXME: approximate since E[f(x)] not equal to f(E[x]) expected_split_time = expected_cut_time + self.get_parent_split_time(node_id, settings) variance_from_mean = self.variance_coef * (sigmoid(self.sigmoid_coef * expected_split_time) \ - sigmoid(self.sigmoid_coef * self.get_parent_split_time(node_id, settings))) pred_second_moment_tmp += variance_from_mean pred_variance_tmp = pred_second_moment_tmp - pred_mean_tmp ** 2 pred_sample_tmp = pred_mean_tmp + np.random.randn(n_test_non_zero) * np.sqrt(pred_variance_tmp) log_pred_prob_tmp = compute_gaussian_logpdf(pred_mean_tmp, pred_variance_tmp, y_test[idx_test_non_zero]) prob_separated_now_weighted = \ prob_separated_now[idx_non_zero] * prob_not_separated_yet[idx_test_non_zero] pred_mean[idx_test_non_zero] += prob_separated_now_weighted * pred_mean_tmp pred_sample[idx_test_non_zero] += prob_separated_now_weighted * pred_sample_tmp pred_second_moment[idx_test_non_zero] += prob_separated_now_weighted * pred_second_moment_tmp log_pred_prob[idx_test_non_zero] = logsumexp_array(log_pred_prob[idx_test_non_zero], \ np.log(prob_separated_now_weighted) + log_pred_prob_tmp) prob_not_separated_yet[idx_test] *= prob_not_separated_now # predictions for idx_test_zero if math.isinf(self.max_split_costs[node_id]) and np.any(idx_zero): idx_test_zero = idx_test[idx_zero] n_test_zero = len(idx_test_zero) if not settings.smooth_hierarchically: pred_mean_node_id = self.sum_y[node_id] / float(self.n_points[node_id]) pred_second_moment_node_id = self.sum_y2[node_id] / float(self.n_points[node_id]) \ + param.noise_variance else: pred_mean_node_id, pred_second_moment_node_id = self.pred_moments[node_id] pred_variance_node_id = pred_second_moment_node_id - pred_mean_node_id ** 2 pred_sample_node_id = pred_mean_node_id + np.random.randn(n_test_zero) * np.sqrt(pred_variance_node_id) log_pred_prob_node_id = \ compute_gaussian_logpdf(pred_mean_node_id, pred_variance_node_id, y_test[idx_test_zero]) pred_mean[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_mean_node_id pred_sample[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_sample_node_id pred_second_moment[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_second_moment_node_id log_pred_prob[idx_test_zero] = logsumexp_array(log_pred_prob[idx_test_zero], \ np.log(prob_not_separated_yet[idx_test_zero]) + log_pred_prob_node_id) try: feat_id, split = self.node_info[node_id] cond = x[:, feat_id] <= split left, right = get_children_id(node_id) d_idx_test[left], d_idx_test[right] = idx_test[cond], idx_test[~cond] node_list.append(left) node_list.append(right) except KeyError: pass pred_var = pred_second_moment - (pred_mean ** 2) if True or settings.debug: # FIXME: remove later assert not np.any(np.isnan(pred_mean)) assert not np.any(np.isnan(pred_var)) try: assert np.all(pred_var >= 0.) except AssertionError: min_pred_var = np.min(pred_var) print 'min_pred_var = %s' % min_pred_var assert np.abs(min_pred_var) < 1e-3 # allowing some numerical errors assert not np.any(np.isnan(log_pred_prob)) return (pred_mean, pred_var, pred_second_moment, log_pred_prob, pred_sample)
IndexError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.predict_real
4,560
def check_tree(self, settings, data): """ check if tree violates any sanity check """ if settings.debug: #print '\nchecking tree' print '\nchecking tree: printing tree first' self.print_tree(settings) for node_id in self.non_leaf_nodes: assert node_id.left.parent == node_id.right.parent == node_id assert not node_id.is_leaf if settings.optype == 'class': assert np.count_nonzero(self.counts[node_id]) > 1 assert not self.pause_mondrian(node_id, settings) if node_id != self.root: assert np.all(node_id.min_d >= node_id.parent.min_d) assert np.all(node_id.max_d <= node_id.parent.max_d) if settings.optype == 'class': try: check_if_zero(np.sum(np.abs(self.counts[node_id] - \ self.counts[node_id.left] - self.counts[node_id.right]))) except __HOLE__: print 'counts: node = %s, left = %s, right = %s' \ % (self.counts[node_id], self.counts[node_id.left], self.counts[node_id.right]) raise AssertionError if settings.budget == -1: assert math.isinf(node_id.budget) check_if_zero(self.split_times[node_id] - self.get_parent_split_time(node_id, settings) \ - self.max_split_costs[node_id]) if settings.optype == 'class': num_data_points = 0 for node_id in self.leaf_nodes: assert node_id.is_leaf assert math.isinf(self.max_split_costs[node_id]) if settings.budget == -1: assert math.isinf(node_id.budget) if settings.optype == 'class': num_data_points += self.counts[node_id].sum() assert np.count_nonzero(self.counts[node_id]) == 1 assert self.pause_mondrian(node_id, settings) if node_id != self.root: assert np.all(node_id.min_d >= node_id.parent.min_d) assert np.all(node_id.max_d <= node_id.parent.max_d) if settings.optype == 'class': print 'num_train = %s, number of data points at leaf nodes = %s' % \ (data['n_train'], num_data_points) set_non_leaf = set(self.non_leaf_nodes) set_leaf = set(self.leaf_nodes) assert (set_leaf & set_non_leaf) == set([]) assert set_non_leaf == set(self.node_info.keys()) assert len(set_leaf) == len(self.leaf_nodes) assert len(set_non_leaf) == len(self.non_leaf_nodes)
AssertionError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.check_tree
4,561
def update_posterior_counts(self, param, data, settings): """ posterior update for hierarchical normalized stable distribution using interpolated Kneser Ney smoothing (where number of tables serving a dish at a restaurant is atmost 1) NOTE: implementation optimized for minibatch training where more than one data point added per minibatch if only 1 datapoint is added, lots of counts will be unnecesarily updated """ self.cnt = {} node_list = [self.root] while True: try: node_id = node_list.pop(0) except __HOLE__: break if node_id.is_leaf: cnt = self.counts[node_id] else: cnt = np.minimum(self.counts[node_id.left], 1) + np.minimum(self.counts[node_id.right], 1) node_list.extend([node_id.left, node_id.right]) self.cnt[node_id] = cnt
IndexError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.update_posterior_counts
4,562
def update_predictive_posteriors(self, param, data, settings): """ update predictive posterior for hierarchical normalized stable distribution pred_prob computes posterior mean of the label distribution at each node recursively """ node_list = [self.root] if settings.debug: self.gen_node_ids_print() while True: try: node_id = node_list.pop(0) except __HOLE__: break base = self.get_prior_mean(node_id, param, settings) discount = self.get_discount_node_id(node_id, settings) cnt = self.cnt[node_id] if not node_id.is_leaf: self.pred_prob[node_id] = self.compute_posterior_mean_normalized_stable(cnt, discount, base, settings) node_list.extend([node_id.left, node_id.right]) if settings.debug and False: print 'node_id = %20s, is_leaf = %5s, discount = %.2f, cnt = %s, base = %s, pred_prob = %s' \ % (self.node_ids_print[node_id], node_id.is_leaf, discount, cnt, base, self.pred_prob[node_id])
IndexError
dataset/ETHPy150Open balajiln/mondrianforest/src/mondrianforest.py/MondrianTree.update_predictive_posteriors
4,563
def load(self): """ Loads the source CSV for the provided model based on settings and database connections. """ # check if can load into dat if getattr(settings, 'CALACCESS_DAT_SOURCE', None) and six.PY2: self.load_dat() # if not using default db, make sure the database is set up in django's settings if self.database: try: engine = settings.DATABASES[self.database]['ENGINE'] except __HOLE__: raise TypeError( "{} not configured in DATABASES settings.".format(self.database) ) # set up database connection self.connection = connections[self.database] self.cursor = self.connection.cursor() # check the kind of database before calling db-specific load method if engine == 'django.db.backends.mysql': self.load_mysql() elif engine in ( 'django.db.backends.postgresql_psycopg2' 'django.contrib.gis.db.backends.postgis' ): self.load_postgresql() else: self.failure("Sorry your database engine is unsupported") raise CommandError( "Only MySQL and PostgresSQL backends supported." )
KeyError
dataset/ETHPy150Open california-civic-data-coalition/django-calaccess-raw-data/calaccess_raw/management/commands/loadcalaccessrawfile.py/Command.load
4,564
def get_headers(self): """ Returns the column headers from the csv as a list. """ with open(self.csv, 'r') as infile: csv_reader = CSVKitReader(infile) try: headers = next(csv_reader) except __HOLE__: headers = [] return headers
StopIteration
dataset/ETHPy150Open california-civic-data-coalition/django-calaccess-raw-data/calaccess_raw/management/commands/loadcalaccessrawfile.py/Command.get_headers
4,565
@log_helpers.log_method_call def get_traffic_counters(self, context, routers): accs = {} for router in routers: rm = self.routers.get(router['id']) if not rm: continue for label_id, label in rm.metering_labels.items(): try: chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + label_id, wrap=False) chain_acc = rm.iptables_manager.get_traffic_counters( chain, wrap=False, zero=True) except __HOLE__: LOG.exception(_LE('Failed to get traffic counters, ' 'router: %s'), router) continue if not chain_acc: continue acc = accs.get(label_id, {'pkts': 0, 'bytes': 0}) acc['pkts'] += chain_acc['pkts'] acc['bytes'] += chain_acc['bytes'] accs[label_id] = acc return accs
RuntimeError
dataset/ETHPy150Open openstack/neutron/neutron/services/metering/drivers/iptables/iptables_driver.py/IptablesMeteringDriver.get_traffic_counters
4,566
def _load_yaml(self): """lazy-import PyYAML so that configargparse doesn't have to dependend on it unless this parser is used.""" try: import yaml except __HOLE__: raise ConfigFileParserException("Could not import yaml. " "It can be installed by running 'pip install PyYAML'") return yaml
ImportError
dataset/ETHPy150Open bw2/ConfigArgParse/configargparse.py/YAMLConfigFileParser._load_yaml
4,567
def parse_known_args(self, args = None, namespace = None, config_file_contents = None, env_vars = os.environ): """Supports all the same args as the ArgumentParser.parse_args(..), as well as the following additional args. Additional Args: args: a list of args as in argparse, or a string (eg. "-x -y bla") config_file_contents: String. Used for testing. env_vars: Dictionary. Used for testing. """ if args is None: args = sys.argv[1:] elif type(args) == str: args = args.split() else: args = list(args) # normalize args by converting args like --key=value to --key value normalized_args = list() for arg in args: if arg and arg[0] in self.prefix_chars and '=' in arg: key, value = arg.split('=', 1) normalized_args.append(key) normalized_args.append(value) else: normalized_args.append(arg) args = normalized_args for a in self._actions: a.is_positional_arg = not a.option_strings # maps a string describing the source (eg. env var) to a settings dict # to keep track of where values came from (used by print_values()). # The settings dicts for env vars and config files will then map # the config key to an (argparse Action obj, string value) 2-tuple. self._source_to_settings = OrderedDict() if args: a_v_pair = (None, list(args)) # copy args list to isolate changes self._source_to_settings[_COMMAND_LINE_SOURCE_KEY] = {'': a_v_pair} # handle auto_env_var_prefix __init__ arg by setting a.env_var as needed if self._auto_env_var_prefix is not None: for a in self._actions: config_file_keys = self.get_possible_config_keys(a) if config_file_keys and not (a.env_var or a.is_positional_arg or a.is_config_file_arg or a.is_write_out_config_file_arg or type(a) == argparse._HelpAction): stripped_config_file_key = config_file_keys[0].strip( self.prefix_chars) a.env_var = (self._auto_env_var_prefix + stripped_config_file_key).replace('-', '_').upper() # add env var settings to the commandline that aren't there already env_var_args = [] actions_with_env_var_values = [a for a in self._actions if not a.is_positional_arg and a.env_var and a.env_var in env_vars and not already_on_command_line(args, a.option_strings)] for action in actions_with_env_var_values: key = action.env_var value = env_vars[key] # TODO parse env var values here to allow lists? env_var_args += self.convert_item_to_command_line_arg( action, key, value) args = env_var_args + args if env_var_args: self._source_to_settings[_ENV_VAR_SOURCE_KEY] = OrderedDict( [(a.env_var, (a, env_vars[a.env_var])) for a in actions_with_env_var_values]) # before parsing any config files, check if -h was specified. supports_help_arg = any( a for a in self._actions if type(a) == argparse._HelpAction) skip_config_file_parsing = supports_help_arg and ( "-h" in args or "--help" in args) # prepare for reading config file(s) known_config_keys = dict((config_key, action) for action in self._actions for config_key in self.get_possible_config_keys(action)) # open the config file(s) config_streams = [] if config_file_contents: stream = StringIO(config_file_contents) stream.name = "method arg" config_streams = [stream] elif not skip_config_file_parsing: config_streams = self._open_config_files(args) # parse each config file for stream in reversed(config_streams): try: config_items = self._config_file_parser.parse(stream) except ConfigFileParserException as e: self.error(e) finally: if hasattr(stream, "close"): stream.close() # add each config item to the commandline unless it's there already config_args = [] for key, value in config_items.items(): if key in known_config_keys: action = known_config_keys[key] discard_this_key = already_on_command_line( args, action.option_strings) else: action = None discard_this_key = self._ignore_unknown_config_file_keys or \ already_on_command_line( args, self.get_command_line_key_for_unknown_config_file_setting(key)) if not discard_this_key: config_args += self.convert_item_to_command_line_arg( action, key, value) source_key = "%s|%s" %(_CONFIG_FILE_SOURCE_KEY, stream.name) if source_key not in self._source_to_settings: self._source_to_settings[source_key] = OrderedDict() self._source_to_settings[source_key][key] = (action, value) args = config_args + args # save default settings for use by print_values() default_settings = OrderedDict() for action in self._actions: cares_about_default_value = (not action.is_positional_arg or action.nargs in [OPTIONAL, ZERO_OR_MORE]) if (already_on_command_line(args, action.option_strings) or not cares_about_default_value or action.default is None or action.default == SUPPRESS or type(action) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE): continue else: if action.option_strings: key = action.option_strings[-1] else: key = action.dest default_settings[key] = (action, str(action.default)) if default_settings: self._source_to_settings[_DEFAULTS_SOURCE_KEY] = default_settings # parse all args (including commandline, config file, and env var) namespace, unknown_args = argparse.ArgumentParser.parse_known_args( self, args=args, namespace=namespace) # handle any args that have is_write_out_config_file_arg set to true user_write_out_config_file_arg_actions = [a for a in self._actions if getattr(a, "is_write_out_config_file_arg", False)] if user_write_out_config_file_arg_actions: output_file_paths = [] for action in user_write_out_config_file_arg_actions: # check if the user specified this arg on the commandline output_file_path = getattr(namespace, action.dest, None) if output_file_path: # validate the output file path try: with open(output_file_path, "w") as output_file: output_file_paths.append(output_file_path) except __HOLE__ as e: raise ValueError("Couldn't open %s for writing: %s" % ( output_file_path, e)) if output_file_paths: # generate the config file contents config_items = self.get_items_for_config_file_output( self._source_to_settings, namespace) file_contents = self._config_file_parser.serialize(config_items) for output_file_path in output_file_paths: with open(output_file_path, "w") as output_file: output_file.write(file_contents) if len(output_file_paths) == 1: output_file_paths = output_file_paths[0] self.exit(0, "Wrote config file to " + str(output_file_paths)) return namespace, unknown_args
IOError
dataset/ETHPy150Open bw2/ConfigArgParse/configargparse.py/ArgumentParser.parse_known_args
4,568
def exists(self, bucket, label): '''Whether a given bucket:label object already exists.''' fn = self._zf(bucket, label) try: self.z.getinfo(fn) return True except __HOLE__: return False
KeyError
dataset/ETHPy150Open okfn/ofs/ofs/local/zipstore.py/ZOFS.exists
4,569
def _get_bucket_md(self, bucket): name = self._zf(bucket, MD_FILE) if not self.exists(bucket, MD_FILE): raise OFSFileNotFound if self.mode !="w": #z = ZipFile(self.zipfile, "r", self.compression, self.allowZip64) json_doc = self.z.read(name) #z.close() try: jsn = json.loads(json_doc) return jsn except __HOLE__: raise OFSException, "Cannot read metadata for %s" % bucket else: raise OFSException, "Cannot read from archive in 'w' mode"
ValueError
dataset/ETHPy150Open okfn/ofs/ofs/local/zipstore.py/ZOFS._get_bucket_md
4,570
def get_data_from_context(context): """Get the django paginator data object from the given *context*. The context is a dict-like object. If the context key ``endless`` is not found, a *PaginationError* is raised. """ try: return context['endless'] except __HOLE__: raise exceptions.PaginationError( 'Cannot find endless data in context.')
KeyError
dataset/ETHPy150Open frankban/django-endless-pagination/endless_pagination/utils.py/get_data_from_context
4,571
def get_page_number_from_request( request, querystring_key=PAGE_LABEL, default=1): """Retrieve the current page number from *GET* or *POST* data. If the page does not exists in *request*, or is not a number, then *default* number is returned. """ try: return int(request.REQUEST[querystring_key]) except (KeyError, TypeError, __HOLE__): return default
ValueError
dataset/ETHPy150Open frankban/django-endless-pagination/endless_pagination/utils.py/get_page_number_from_request
4,572
def normalize_page_number(page_number, page_range): """Handle a negative *page_number*. Return a positive page number contained in *page_range*. If the negative index is out of range, return the page number 1. """ try: return page_range[page_number] except __HOLE__: return page_range[0]
IndexError
dataset/ETHPy150Open frankban/django-endless-pagination/endless_pagination/utils.py/normalize_page_number
4,573
def clean_linked_files( logger, event_queue, metadata_path, files_that_collide, files_to_clean, dry_run): """Removes a list of files and adjusts collison counts for colliding files. This function synchronizes access to the devel collisions file. :param devel_space_abs: absolute path to merged devel space :param files_that_collide: list of absolute paths to files that collide :param files_to_clean: list of absolute paths to files to clean """ # Get paths devel_collisions_file_path = os.path.join(metadata_path, 'devel_collisions.txt') # Map from dest files to number of collisions dest_collisions = dict() # Load destination collisions file if os.path.exists(devel_collisions_file_path): with open(devel_collisions_file_path, 'r') as collisions_file: collisions_reader = csv.reader(collisions_file, delimiter=' ', quotechar='"') dest_collisions = dict([(path, int(count)) for path, count in collisions_reader]) # Add collisions for dest_file in files_that_collide: if dest_file in dest_collisions: dest_collisions[dest_file] += 1 else: dest_collisions[dest_file] = 1 # Remove files that no longer collide for dest_file in files_to_clean: # Get the collisions n_collisions = dest_collisions.get(dest_file, 0) # Check collisions if n_collisions == 0: logger.out('Unlinking: {}'.format(dest_file)) # Remove this link if not dry_run: if os.path.exists(dest_file): try: os.unlink(dest_file) except __HOLE__: logger.err('Could not unlink: {}'.format(dest_file)) raise # Remove any non-empty directories containing this file try: os.removedirs(os.path.split(dest_file)[0]) except OSError: pass else: logger.out('Already unlinked: {}') # Update collisions if n_collisions > 1: # Decrement the dest collisions dict dest_collisions[dest_file] -= 1 elif n_collisions == 1: # Remove it from the dest collisions dict del dest_collisions[dest_file] # Load destination collisions file if not dry_run: with open(devel_collisions_file_path, 'w') as collisions_file: collisions_writer = csv.writer(collisions_file, delimiter=' ', quotechar='"') for dest_file, count in dest_collisions.items(): collisions_writer.writerow([dest_file, count])
OSError
dataset/ETHPy150Open catkin/catkin_tools/catkin_tools/jobs/catkin.py/clean_linked_files
4,574
def link_devel_products( logger, event_queue, package, package_path, devel_manifest_path, source_devel_path, dest_devel_path, metadata_path, prebuild): """Link files from an isolated devel space into a merged one. This creates directories and symlinks in a merged devel space to a package's linked devel space. """ # Create the devel manifest path if necessary mkdir_p(devel_manifest_path) # Construct manifest file path devel_manifest_file_path = os.path.join(devel_manifest_path, DEVEL_MANIFEST_FILENAME) # Pair of source/dest files or directories products = list() # List of files to clean files_to_clean = [] # List of files that collide files_that_collide = [] # Select the blacklist blacklist = DEVEL_LINK_PREBUILD_BLACKLIST if prebuild else DEVEL_LINK_BLACKLIST # Gather all of the files in the devel space for source_path, dirs, files in os.walk(source_devel_path): # compute destination path dest_path = os.path.join(dest_devel_path, os.path.relpath(source_path, source_devel_path)) # create directories in the destination develspace for dirname in dirs: dest_dir = os.path.join(dest_path, dirname) if not os.path.exists(dest_dir): # Create the dest directory if it doesn't exist os.mkdir(dest_dir) elif not os.path.isdir(dest_dir): logger.err('Error: Cannot create directory: ' + dest_dir) return -1 # create symbolic links from the source to the dest for filename in files: # Don't link files on the blacklist unless this is a prebuild package if os.path.relpath(os.path.join(source_path, filename), source_devel_path) in blacklist: continue source_file = os.path.join(source_path, filename) dest_file = os.path.join(dest_path, filename) # Store the source/dest pair products.append((source_file, dest_file)) # Check if the symlink exists if os.path.exists(dest_file): if os.path.realpath(dest_file) != os.path.realpath(source_file): # Compute hashes for colliding files source_hash = md5(open(os.path.realpath(source_file)).read().encode('utf-8')).hexdigest() dest_hash = md5(open(os.path.realpath(dest_file)).read().encode('utf-8')).hexdigest() # If the link links to a different file, report a warning and increment # the collision counter for this path if dest_hash != source_hash: logger.err('Warning: Cannot symlink from %s to existing file %s' % (source_file, dest_file)) logger.err('Warning: Source hash: {}'.format(source_hash)) logger.err('Warning: Dest hash: {}'.format(dest_hash)) # Increment link collision counter files_that_collide.append(dest_file) else: logger.out('Linked: ({}, {})'.format(source_file, dest_file)) else: # Create the symlink logger.out('Symlinking %s' % (dest_file)) try: os.symlink(source_file, dest_file) except __HOLE__: logger.err('Could not create symlink `{}` referencing `{}`'.format(dest_file, source_file)) raise # Load the old list of symlinked files for this package if os.path.exists(devel_manifest_file_path): with open(devel_manifest_file_path, 'r') as devel_manifest: manifest_reader = csv.reader(devel_manifest, delimiter=' ', quotechar='"') # Skip the package source directory devel_manifest.readline() # Read the previously-generated products for source_file, dest_file in manifest_reader: # print('Checking (%s, %s)' % (source_file, dest_file)) if (source_file, dest_file) not in products: # Clean the file or decrement the collision count logger.out('Cleaning: (%s, %s)' % (source_file, dest_file)) files_to_clean.append(dest_file) # Remove all listed symlinks and empty directories which have been removed # after this build, and update the collision file try: clean_linked_files(logger, event_queue, metadata_path, files_that_collide, files_to_clean, dry_run=False) except: logger.err('Could not clean linked files.') raise # Save the list of symlinked files with open(devel_manifest_file_path, 'w') as devel_manifest: # Write the path to the package source directory devel_manifest.write('%s\n' % package_path) # Write all the products manifest_writer = csv.writer(devel_manifest, delimiter=' ', quotechar='"') for source_file, dest_file in products: manifest_writer.writerow([source_file, dest_file]) return 0
OSError
dataset/ETHPy150Open catkin/catkin_tools/catkin_tools/jobs/catkin.py/link_devel_products
4,575
def tearDown(self): try: for k in traverse_registry_key(MyRegistryConfig.REGISTRY_KEY, MyRegistryConfig.REGISTRY_PATH): winreg.DeleteKey(MyRegistryConfig.REGISTRY_KEY, k) except __HOLE__: pass TestConfigMixin.tearDown(self)
OSError
dataset/ETHPy150Open GreatFruitOmsk/nativeconfig/test/config/registry_config.py/TestRegistryConfig.tearDown
4,576
def _load_app_middleware(cls, app): app_settings = app.settings if not app_settings: return mw_classes = app_settings.__dict__.get('MIDDLEWARE_CLASSES', []) result = {'view': [], 'response': [], 'exception': []} for middleware_path in mw_classes: # This code brutally lifted from django.core.handlers try: dot = middleware_path.rindex('.') except __HOLE__: raise exceptions.ImproperlyConfigured, _('%(module)s isn\'t a middleware module.') % {'module': middleware_path} mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:] try: mod = __import__(mw_module, {}, {}, ['']) except ImportError, e: raise exceptions.ImproperlyConfigured, _('Error importing middleware %(module)s: "%(error)s".') % {'module': mw_module, 'error': e} try: mw_class = getattr(mod, mw_classname) except AttributeError: raise exceptions.ImproperlyConfigured, _('Middleware module "%(module)s" does not define a "%(class)s" class.') % {'module': mw_module, 'class':mw_classname} try: mw_instance = mw_class() except exceptions.MiddlewareNotUsed: continue # End brutal code lift # We need to make sure we don't have a process_request function because we don't know what # application will handle the request at the point process_request is called if hasattr(mw_instance, 'process_request'): raise exceptions.ImproperlyConfigured, \ _('AppSpecificMiddleware module "%(module)s" has a process_request function' + \ ' which is impossible.') % {'module': middleware_path} if hasattr(mw_instance, 'process_view'): result['view'].append(mw_instance.process_view) if hasattr(mw_instance, 'process_response'): result['response'].insert(0, mw_instance.process_response) if hasattr(mw_instance, 'process_exception'): result['exception'].insert(0, mw_instance.process_exception) return result
ValueError
dataset/ETHPy150Open cloudera/hue/desktop/core/src/desktop/middleware.py/AppSpecificMiddleware._load_app_middleware
4,577
def clean_username(self, username, request): """ Allows the backend to clean the username, if the backend defines a clean_username method. """ backend_str = request.session[BACKEND_SESSION_KEY] backend = load_backend(backend_str) try: username = backend.clean_username(username) except __HOLE__: pass return username
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/src/desktop/middleware.py/SpnegoMiddleware.clean_username
4,578
def test_unicode(): mp.dps = 15 try: unicode = unicode except __HOLE__: unicode = str assert mpf(unicode('2.76')) == 2.76 assert mpf(unicode('inf')) == inf
NameError
dataset/ETHPy150Open fredrik-johansson/mpmath/mpmath/tests/test_convert.py/test_unicode
4,579
def compute(self): (positions, center) = self.get_positions() legacy = self.get_input("allowLegacy") use_values = True try: values = [float(x) for x in self.get_values()] except __HOLE__, e: # LEGACY SUPPORT if legacy: use_values = False else: raise ModuleError(self, "Must provide values column") if not use_values: symbol_data = positions symbol_options = {} else: symbol_options = self.get_options(self.SPECS) symbol_options["path"] = \ RawJavaScriptText("google.maps.SymbolPath.CIRCLE") min_value = min(values) max_value = max(values) # if we have black or white, we want hue to match the other side def white_or_black(c): return ((c[0] < 1e-8 and c[1] < 1e-8 and c[2] < 1e-8) or (c[0] > 1-1e-8 and c[1] > 1-1e-8 and c[2] > 1-1e-8)) start_c = symbol_options.pop("fillStartColor").tuple end_c = symbol_options.pop("fillEndColor").tuple start_wb = white_or_black(start_c) end_wb = white_or_black(end_c) start_c = list(colorsys.rgb_to_hsv(*start_c)) end_c = list(colorsys.rgb_to_hsv(*end_c)) if start_wb: start_c[0] = end_c[0] elif end_wb: end_c[0] = start_c[0] symbol_data = [] for i in xrange(len(positions)): val = values[i] if max_value - min_value < 1e-8: norm_val = 1.0 else: norm_val = (val - min_value) / (max_value - min_value) color = [] for j in xrange(len(start_c)): color.append((1.0 - norm_val) * start_c[j] + norm_val * end_c[j]) color = colorsys.hsv_to_rgb(*color) symbol_data.append([positions[i], GMapColor(255 * color[0], 255 * color[1], 255 * color[2])]) symbol_titles = self.get_titles(default_col=(3 if legacy else None)) data = {"symbol_data": symbol_data, "symbol_options": symbol_options, "symbol_titles": symbol_titles, "use_values": use_values} vis_data = GMapVisData([], self.TEMPLATE, data, center) self.set_output("self", vis_data)
ValueError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/gmaps/vis.py/GMapSymbols.compute
4,580
def match(self, url): ''' Try to find if url matches against any of the schemes within this endpoint. Args: url: The url to match against each scheme Returns: True if a matching scheme was found for the url, False otherwise ''' try: urlSchemes = self._urlSchemes.itervalues() # Python 2 except __HOLE__: urlSchemes = self._urlSchemes.values() # Python 3 for urlScheme in urlSchemes: if urlScheme.match(url): return True return False
AttributeError
dataset/ETHPy150Open abarmat/python-oembed/oembed/__init__.py/OEmbedEndpoint.match
4,581
def vote(request, poll_id): p = get_object_or_404(Poll, pk=poll_id) try: selected_choice = Choice.objects.get(poll_id=p.pk, pk=request.POST['choice']) except (__HOLE__, Choice.DoesNotExist): # Redisplay the poll voting form. return render_to_response('polls/detail.html', { 'poll': p, 'error_message': "You didn't select a choice.", }, context_instance=RequestContext(request)) else: Choice.objects.filter(pk=pk, poll_id=p.pk).update(votes=F('votes') + 1) # Always return an HttpResponseRedirect after successfully dealing # with POST data. This prevents data from being posted twice if a # user hits the Back button. return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
KeyError
dataset/ETHPy150Open disqus/sharding-example/polls/views.py/vote
4,582
def find_package(self, package): for path in self.paths(): full = os.path.join(path, package) if os.path.exists(full): return package, full if not os.path.isdir(path) and zipfile.is_zipfile(path): zip = zipfile.ZipFile(path, 'r') try: zip.read(os.path.join(package, '__init__.py')) except __HOLE__: pass else: zip.close() return package, full zip.close() ## FIXME: need special error for package.py case: raise InstallationError( 'No package with the name %s found' % package)
KeyError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/pip/commands/zip.py/ZipCommand.find_package
4,583
def make_inode_tests(fixture): """ Create a TestCase for ``INode``. :param fixture: A fixture that returns a :class:`INode` provider which will work with any arbitrary valid program with arguments. """ class INodeTests(TestCase): """Tests for :class:`INode` implementors. May be functional tests depending on the fixture. """ def test_interface(self): """ The tested object provides :class:`INode`. """ node = fixture(self) self.assertTrue(verifyObject(INode, node)) def test_run_no_fd_leakage(self): """ No file descriptors are leaked by ``run()``. """ node = fixture(self) with assertNoFDsLeaked(self): with node.run([b"cat"]): pass def test_run_exceptions_pass_through(self): """ Exceptions raised in the context manager are not swallowed. """ node = fixture(self) def run_node(): with node.run([b"cat"]): raise RuntimeError() self.assertRaises(RuntimeError, run_node) def test_run_no_fd_leakage_exceptions(self): """ No file descriptors are leaked by ``run()`` if exception is raised within the context manager. """ node = fixture(self) with assertNoFDsLeaked(self): try: with node.run([b"cat"]): raise RuntimeError() except __HOLE__: pass def test_run_writeable(self): """ The returned object from ``run()`` is writeable. """ node = fixture(self) with node.run([b"python", b"-c", b"import sys; sys.stdin.read()"]) as writer: writer.write(b"hello") writer.write(b"there") def test_get_output_no_leakage(self): """ No file descriptors are leaked by ``get_output()``. """ node = fixture(self) with assertNoFDsLeaked(self): node.get_output([b"echo", b"hello"]) def test_get_output_result_bytes(self): """ ``get_output()`` returns a result that is ``bytes``. """ node = fixture(self) result = node.get_output([b"echo", b"hello"]) self.assertIsInstance(result, bytes) return INodeTests
RuntimeError
dataset/ETHPy150Open ClusterHQ/flocker/flocker/common/test/test_ipc.py/make_inode_tests
4,584
def decode(name): try: AreaVi.ACTIVE.decode(name) except __HOLE__: set_status_msg('Failed! Charset %s' % name)
UnicodeDecodeError
dataset/ETHPy150Open iogf/vy/vyapp/plugins/codec.py/decode
4,585
def __run_test_methods(self, class_fixture_failures): """Run this class's setup fixtures / test methods / teardown fixtures. These are run in the obvious order - setup and teardown go before and after, respectively, every test method. If there was a failure in the class_setup phase, no method-level fixtures or test methods will be run, and we'll eventually skip all the way to the class_teardown phase. If a given test method is marked as disabled, neither it nor its fixtures will be run. If there is an exception during the setup phase, the test method will not be run and execution will continue with the teardown phase. """ for test_method in self.runnable_test_methods(): result = TestResult(test_method) # Sometimes, test cases want to take further action based on # results, e.g. further clean-up or reporting if a test method # fails. (Yelp's Selenium test cases do this.) If you need to # programatically inspect test results, you should use # self.results(). # NOTE: THIS IS INCORRECT -- im_self is shared among all test # methods on the TestCase instance. This is preserved for backwards # compatibility and should be removed eventually. try: # run "on-run" callbacks. e.g. print out the test method name self.fire_event(self.EVENT_ON_RUN_TEST_METHOD, result) result.start() self.__all_test_results.append(result) # if class setup failed, this test has already failed. self._stage = self.STAGE_CLASS_SETUP for exc_info in class_fixture_failures: result.end_in_failure(exc_info) if result.complete: continue # first, run setup fixtures self._stage = self.STAGE_SETUP with self.__test_fixtures.instance_context() as fixture_failures: # we haven't had any problems in class/instance setup, onward! if not fixture_failures: self._stage = self.STAGE_TEST_METHOD result.record(test_method) self._stage = self.STAGE_TEARDOWN # maybe something broke during teardown -- record it for exc_info in fixture_failures: result.end_in_failure(exc_info) # if nothing's gone wrong, it's not about to start if not result.complete: result.end_in_success() except (__HOLE__, SystemExit): result.end_in_interruption(sys.exc_info()) raise finally: self.fire_event(self.EVENT_ON_COMPLETE_TEST_METHOD, result) if not result.success: self.failure_count += 1 if self.failure_limit and self.failure_count >= self.failure_limit: break
KeyboardInterrupt
dataset/ETHPy150Open Yelp/Testify/testify/test_case.py/TestCase.__run_test_methods
4,586
def FileEntryExistsByPathSpec(self, path_spec): """Determines if a file entry for a path specification exists. Args: path_spec: a path specification (instance of PathSpec). Returns: Boolean indicating if the file entry exists. """ location = getattr(path_spec, u'location', None) if location is None: return False is_device = False if platform.system() == u'Windows': # Windows does not support running os.path.exists on device files # so we use libsmdev to do the check. try: is_device = pysmdev.check_device(location) except __HOLE__ as exception: # Since pysmdev will raise IOError when it has no access to the device # we check if the exception message contains ' access denied ' and # return true. # Note that exception.message no longer works in Python 3. exception_string = str(exception) if not isinstance(exception_string, py2to3.UNICODE_TYPE): exception_string = py2to3.UNICODE_TYPE( exception_string, errors=u'replace') if u' access denied ' in exception_string: is_device = True if not is_device and not os.path.exists(location): return False return True
IOError
dataset/ETHPy150Open log2timeline/dfvfs/dfvfs/vfs/os_file_system.py/OSFileSystem.FileEntryExistsByPathSpec
4,587
@internationalizeDocstring def connect(self, irc, msg, args, opts, network, server, password): """[--nossl] <network> [<host[:port]>] [<password>] Connects to another network (which will be represented by the name provided in <network>) at <host:port>. If port is not provided, it defaults to 6697, the default port for IRC with SSL. If password is provided, it will be sent to the server in a PASS command. If --nossl is provided, an SSL connection will not be attempted, and the port will default to 6667. """ if '.' in network: irc.error("Network names cannot have a '.' in them. " "Remember, this is the network name, not the actual " "server you plan to connect to.", Raise=True) try: otherIrc = self._getIrc(network) irc.error(_('I\'m already connected to %s.') % network) return # We've gotta return here. This is ugly code, but I'm not # quite sure what to do about it. except callbacks.Error: pass ssl = True for (opt, arg) in opts: if opt == 'nossl': ssl = False if server: if ':' in server: (server, port) = server.split(':') port = int(port) elif ssl: port = 6697 else: port = 6667 serverPort = (server, port) else: try: serverPort = conf.supybot.networks.get(network).servers()[0] except (registry.NonExistentRegistryEntry, __HOLE__): irc.error(_('A server must be provided if the network is not ' 'already registered.')) return Owner = irc.getCallback('Owner') newIrc = Owner._connect(network, serverPort=serverPort, password=password, ssl=ssl) conf.supybot.networks().add(network) assert newIrc.callbacks is irc.callbacks, 'callbacks list is different' irc.replySuccess(_('Connection to %s initiated.') % network)
IndexError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Network/plugin.py/Network.connect
4,588
def _setup_joins_with_translation(self, names, opts, alias, dupe_multis, allow_many=True, allow_explicit_fk=False, can_reuse=None, negate=False, process_extras=True): """ This is based on a full copy of Query.setup_joins because currently I see no way to handle it differently. TO DO: there might actually be a way, by splitting a single multi-name setup_joins call into separate calls. Check it. -- marcin@elksoft.pl Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are joining to), 'alias' is the alias for the table we are joining to. If dupe_multis is True, any many-to-many or many-to-one joins will always create a new alias (necessary for disjunctive filters). Returns the final field involved in the join, the target database column (used for any 'where' constraint), the final 'opts' value and the list of tables joined. """ joins = [alias] last = [0] dupe_set = set() exclusions = set() extra_filters = [] for pos, name in enumerate(names): try: exclusions.add(int_alias) except __HOLE__: pass exclusions.add(alias) last.append(len(joins)) if name == 'pk': name = opts.pk.name try: field, model, direct, m2m = opts.get_field_by_name(name) except FieldDoesNotExist: for f in opts.fields: if allow_explicit_fk and name == f.attname: # XXX: A hack to allow foo_id to work in values() for # backwards compatibility purposes. If we dropped that # feature, this could be removed. field, model, direct, m2m = opts.get_field_by_name(f.name) break else: names = opts.get_all_field_names() + self.aggregate_select.keys() raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) if not allow_many and (m2m or not direct): for alias in joins: self.unref_alias(alias) raise MultiJoin(pos + 1) #=================================================================== # Django Multilingual NG Specific Code START #=================================================================== if hasattr(opts, 'translation_model'): translation_opts = opts.translation_model._meta if model == opts.translation_model: language_code = translation_opts.translated_fields[name][1] if language_code is None: language_code = get_default_language() #TODO: check alias master_table_name = opts.db_table trans_table_alias = get_translation_table_alias( model._meta.db_table, language_code) new_table = (master_table_name + "__" + trans_table_alias) qn = self.get_compiler(DEFAULT_DB_ALIAS).quote_name_unless_alias qn2 = self.get_compiler(DEFAULT_DB_ALIAS).connection.ops.quote_name trans_join = ("JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_code = '%s'))" % (qn2(model._meta.db_table), qn2(new_table), qn2(new_table), qn(master_table_name), qn2(model._meta.pk.column), qn2(new_table), language_code)) self.extra_join[new_table] = trans_join target = field continue #=================================================================== # Django Multilingual NG Specific Code END #=================================================================== elif model: # The field lives on a base class of the current model. # Skip the chain of proxy to the concrete proxied model proxied_model = get_proxied_model(opts) for int_model in opts.get_base_chain(model): if int_model is proxied_model: opts = int_model._meta else: lhs_col = opts.parents[int_model].column dedupe = lhs_col in opts.duplicate_targets if dedupe: exclusions.update(self.dupe_avoidance.get( (id(opts), lhs_col), ())) dupe_set.add((opts, lhs_col)) opts = int_model._meta alias = self.join((alias, opts.db_table, lhs_col, opts.pk.column), exclusions=exclusions) joins.append(alias) exclusions.add(alias) for (dupe_opts, dupe_col) in dupe_set: self.update_dupe_avoidance(dupe_opts, dupe_col, alias) cached_data = opts._join_cache.get(name) orig_opts = opts dupe_col = direct and field.column or field.field.column dedupe = dupe_col in opts.duplicate_targets if dupe_set or dedupe: if dedupe: dupe_set.add((opts, dupe_col)) exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col), ())) if process_extras and hasattr(field, 'extra_filters'): extra_filters.extend(field.extra_filters(names, pos, negate)) if direct: if m2m: # Many-to-many field defined on the current model. if cached_data: (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) = cached_data else: table1 = field.m2m_db_table() from_col1 = opts.pk.column to_col1 = field.m2m_column_name() opts = field.rel.to._meta table2 = opts.db_table from_col2 = field.m2m_reverse_name() to_col2 = opts.pk.column target = opts.pk orig_opts._join_cache[name] = (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) int_alias = self.join((alias, table1, from_col1, to_col1), dupe_multis, exclusions, nullable=True, reuse=can_reuse) if int_alias == table2 and from_col2 == to_col2: joins.append(int_alias) alias = int_alias else: alias = self.join( (int_alias, table2, from_col2, to_col2), dupe_multis, exclusions, nullable=True, reuse=can_reuse) joins.extend([int_alias, alias]) elif field.rel: # One-to-one or many-to-one field if cached_data: (table, from_col, to_col, opts, target) = cached_data else: opts = field.rel.to._meta target = field.rel.get_related_field() table = opts.db_table from_col = field.column to_col = target.column orig_opts._join_cache[name] = (table, from_col, to_col, opts, target) alias = self.join((alias, table, from_col, to_col), exclusions=exclusions, nullable=field.null) joins.append(alias) else: # Non-relation fields. target = field break else: orig_field = field field = field.field if m2m: # Many-to-many field defined on the target model. if cached_data: (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) = cached_data else: table1 = field.m2m_db_table() from_col1 = opts.pk.column to_col1 = field.m2m_reverse_name() opts = orig_field.opts table2 = opts.db_table from_col2 = field.m2m_column_name() to_col2 = opts.pk.column target = opts.pk orig_opts._join_cache[name] = (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) int_alias = self.join((alias, table1, from_col1, to_col1), dupe_multis, exclusions, nullable=True, reuse=can_reuse) alias = self.join((int_alias, table2, from_col2, to_col2), dupe_multis, exclusions, nullable=True, reuse=can_reuse) joins.extend([int_alias, alias]) else: # One-to-many field (ForeignKey defined on the target model) if cached_data: (table, from_col, to_col, opts, target) = cached_data else: local_field = opts.get_field_by_name( field.rel.field_name)[0] opts = orig_field.opts table = opts.db_table from_col = local_field.column to_col = field.column target = opts.pk orig_opts._join_cache[name] = (table, from_col, to_col, opts, target) alias = self.join((alias, table, from_col, to_col), dupe_multis, exclusions, nullable=True, reuse=can_reuse) joins.append(alias) for (dupe_opts, dupe_col) in dupe_set: try: self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias) except NameError: self.update_dupe_avoidance(dupe_opts, dupe_col, alias) if pos != len(names) - 1: if pos == len(names) - 2: raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1])) else: raise FieldError("Join on field %r not permitted." % name) return field, target, opts, joins, last, extra_filters
NameError
dataset/ETHPy150Open hzlf/openbroadcast/website/multilingual/query.py/MultilingualQuery._setup_joins_with_translation
4,589
def test_calling_user_fails_when_attribute_error_is_raised(self): """ This proves that when an AttributeError is raised inside of the request.user property, that we can handle this and report the true, underlying error. """ class AuthRaisesAttributeError(object): def authenticate(self, request): import rest_framework rest_framework.MISSPELLED_NAME_THAT_DOESNT_EXIST self.request = Request(factory.get('/'), authenticators=(AuthRaisesAttributeError(),)) SessionMiddleware().process_request(self.request) login(self.request, self.user) try: self.request.user except __HOLE__ as error: assert str(error) in ( "'module' object has no attribute 'MISSPELLED_NAME_THAT_DOESNT_EXIST'", # Python < 3.5 "module 'rest_framework' has no attribute 'MISSPELLED_NAME_THAT_DOESNT_EXIST'", # Python >= 3.5 ) else: assert False, 'AttributeError not raised'
AttributeError
dataset/ETHPy150Open tomchristie/django-rest-framework/tests/test_request.py/TestUserSetter.test_calling_user_fails_when_attribute_error_is_raised
4,590
def nativeAdjacencyMatrix(self): """ :returns: the adjacency matrix in the native sparse format. """ try: self.W.eliminate_zeros() except __HOLE__: pass A = self.W/self.W return A
AttributeError
dataset/ETHPy150Open charanpald/APGL/apgl/graph/SparseGraph.py/SparseGraph.nativeAdjacencyMatrix
4,591
def get_urls(self): original_urls = super(AdminViews, self).get_urls() added_urls = [] for link in self.admin_views: if hasattr(self, link[1]): view_func = getattr(self, link[1]) if len(link) == 3: # View requires permission view_func = permission_required(link[2], raise_exception=True)(view_func) added_urls.extend( patterns('', url(regex=r'%s' % link[1], name=link[1], view=self.admin_site.admin_view(view_func) ) ) ) self.local_view_names.append(link[0]) try: model_name = self.model._meta.model_name except __HOLE__: model_name = self.model._meta.module_name # removed as of Django 1.8 # Build URL from known info info = self.model._meta.app_label, model_name self.output_urls.append(( 'view', link[0], "%s/%s/%s/%s" % (ADMIN_URL_PREFIX, info[0], info[1], link[1]), link[2] if len(link) == 3 else None, ) ) else: self.direct_links.append(link) self.output_urls.append(('url', link[0], link[1], link[2] if len(link) == 3 else None)) return added_urls + original_urls
AttributeError
dataset/ETHPy150Open frankwiles/django-admin-views/admin_views/admin.py/AdminViews.get_urls
4,592
def call_pydb(self, args): """Invoke pydb with the supplied parameters.""" try: import pydb except __HOLE__: raise ImportError("pydb doesn't seem to be installed.") if not hasattr(pydb.pydb, "runv"): raise ImportError("You need pydb version 1.19 or later installed.") argl = arg_split(args) # print argl # dbg if len(inspect.getargspec(pydb.runv)[0]) == 2: pdb = debugger.Pdb(color_scheme=self.colors) ip.history_saving_wrapper( lambda : pydb.runv(argl, pdb) )() else: ip.history_saving_wrapper( lambda : pydb.runv(argl) )()
ImportError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/quarantine/ipy_pydb.py/call_pydb
4,593
def read_images(path, fileNameFilter=FileNameFilter("None"), sz=None): """Reads the images in a given folder, resizes images on the fly if size is given. Args: path: Path to a folder with subfolders representing the subjects (persons). sz: A tuple with the size Resizes Returns: A list [X,y] X: The images, which is a Python list of numpy arrays. y: The corresponding labels (the unique number of the subject, person) in a Python list. """ c = 0 X,y = [], [] for dirname, dirnames, filenames in os.walk(path): for subdirname in dirnames: subject_path = os.path.join(dirname, subdirname) for filename in os.listdir(subject_path): if fileNameFilter(filename): try: im = Image.open(os.path.join(subject_path, filename)) im = im.convert("L") # resize to given size (if given) if (sz is not None): im = im.resize(sz, Image.ANTIALIAS) X.append(np.asarray(im, dtype=np.uint8)) y.append(c) except __HOLE__, (errno, strerror): print "I/O error({0}): {1}".format(errno, strerror) except: print "Unexpected error:", sys.exc_info()[0] raise c = c+1 return [X,y]
IOError
dataset/ETHPy150Open bytefish/facerec/py/apps/scripts/lpq_experiment.py/read_images
4,594
def partition_data(X, y): """ Shuffles the input data and splits it into a new set of images. This resembles the experimental setup used in the paper on the Local Phase Quantization descriptor in: "Recognition of Blurred Faces Using Local Phase Quantization", Timo Ahonen, Esa Rahtu, Ville Ojansivu, Janne Heikkila What it does is to build a subset for each class, so it has 1 image for training and the rest for testing. The original dataset is shuffled for each call, hence you always get a new partitioning. """ Xs,ys = shuffle_array(X,y) # Maps index to class: mapping = {} for i in xrange(len(y)): yi = ys[i] try: mapping[yi].append(i) except __HOLE__: mapping[yi] = [i] # Get one image for each subject: Xtrain, ytrain = [], [] Xtest, ytest = [], [] # Finally build partition: for key, indices in mapping.iteritems(): # Add images: Xtrain.extend([ Xs[i] for i in indices[:1] ]) ytrain.extend([ ys[i] for i in indices[:1] ]) Xtest.extend([ Xs[i] for i in indices[1:20]]) ytest.extend([ ys[i] for i in indices[1:20]]) # Return shuffled partitions: return Xtrain, ytrain, Xtest, ytest
KeyError
dataset/ETHPy150Open bytefish/facerec/py/apps/scripts/lpq_experiment.py/partition_data
4,595
def __get__(self, inst, owner): now = time.time() try: value, last_update = inst._cache[self.__name__] if self.ttl > 0 and now - last_update > self.ttl: raise AttributeError except (KeyError, AttributeError): value = self.fget(inst) try: cache = inst._cache except __HOLE__: cache = inst._cache = {} cache[self.__name__] = (value, now) return value
AttributeError
dataset/ETHPy150Open ofa/connect/open_connect/connect_core/utils/third_party/cached_property.py/cached_property.__get__
4,596
@urlmatch(netloc=r'(.*\.)?api\.weixin\.qq\.com$') def wechat_api_mock(url, request): path = url.path.replace('/cgi-bin/', '').replace('/', '_') if path.startswith('_'): path = path[1:] res_file = os.path.join(_FIXTURE_PATH, '%s.json' % path) content = { 'errcode': 99999, 'errmsg': 'can not find fixture %s' % res_file, } headers = { 'Content-Type': 'application/json' } try: with open(res_file, 'rb') as f: content = json.loads(f.read().decode('utf-8')) except (IOError, __HOLE__) as e: print(e) return response(200, content, headers, request=request)
ValueError
dataset/ETHPy150Open jxtech/wechatpy/tests/test_session.py/wechat_api_mock
4,597
def __new__(cls, name, bases, attrs): document = attrs.get("document") if document: try: attrs['model'] = document except __HOLE__: attrs['model'] = property( lambda self: self.document ) return super(WrapDocument, cls).__new__(cls, name, bases, attrs)
AttributeError
dataset/ETHPy150Open MongoEngine/django-mongoengine/django_mongoengine/utils/wrappers.py/WrapDocument.__new__
4,598
def run(): start_name = sys.argv[1] if sys.argv[1:] else '' n_tables = n_read = n_no_data = n_read_errors = n_write = n_write_errors = 0 for tab in sf.introspection.table_list_cache['sobjects']: if tab['retrieveable'] and not tab['name'] in ( # These require specific filters (descried in their error messages) 'CollaborationGroupRecord', 'ContentFolderMember', 'ContentFolderItem', 'ContentDocumentLink', 'Idea', 'IdeaComment', 'UserProfileFeed', 'Vote', #'OpportunityPartner', 'Product2Feed', # TODO The "RecordType" is a very important object, but it can fail # on philchristensen's Salesforce with Travis. It should be more # investigated to which SObject is the RecordType related and enabled # again. 'RecordType', # UNKNOWN_EXCEPTION: 'TenantUsageEntitlement', ): if tab['name'] < start_name: continue [test_class] = [cls for cls in (getattr(mdl, x) for x in dir(mdl)) if isinstance(cls, type) and issubclass(cls, django.db.models.Model) and cls._meta.db_table == tab['name'] ] stdout.write('%s ' % tab['name']) obj = None try: n_read += 1 obj = test_class.objects.all()[0] except SalesforceError as e: stderr.write("\n************** %s %s\n" % (tab['name'], e)) n_read_errors += 1 except __HOLE__: n_no_data += 1 if obj: stdout.write("* ") if obj and tab['updateable'] and not tab['name'] in ( # Cannot modify managed objects 'ApexClass', 'ApexComponent', 'ApexTrigger', 'FieldPermissions', 'ObjectPermissions', 'PermissionSet', 'Scontrol', 'StaticResource', 'WebLink', # This is not writable due to 'NamespacePrefix' field 'ApexPage', # Some Leads are not writable becase they are coverted to Contact 'Lead', # Insufficient access rights on cross-reference id 'Group', 'OpportunityShare', 'Profile', # Some very old items can have empty Folder.Name, but can # not be saved again without Name. 'Folder', # Records with some values of UserShare.RowCause can not be updated. 'UserShare', # Cannot directly insert FeedItem with type TrackedChange 'FeedItem', ): stdout.write('(write) ') try: n_write += 1 obj.save(force_update=True) except SalesforceError as e: stderr.write("\n************** %s %s\n" % (tab['name'], e)) n_write_errors += 1 else: # object 'Topic' doesn't have the attribute 'last_modified_date' # in recently created SFDC databases (proably version 34.0+) if hasattr(obj, 'last_modified_date'): assert test_class.objects.get(pk=obj.pk).last_modified_date > obj.last_modified_date stdout.write('\n') n_tables = len(sf.introspection.table_list_cache['sobjects']) print('Result: {n_tables} tables, {n_read} reads tried, {n_no_data} no data, ' '{n_read_errors} read errors, {n_write} writes tried, {n_write_errors} write errors' .format(n_tables=n_tables, n_read=n_read, n_no_data=n_no_data, n_read_errors=n_read_errors, n_write=n_write, n_write_errors=n_write_errors)) print('********* ERRORs found' if n_read_errors + n_write_errors else 'OK') return n_read_errors + n_write_errors == 0
IndexError
dataset/ETHPy150Open django-salesforce/django-salesforce/tests/inspectdb/slow_test.py/run
4,599
def enforce(self, rule, target, creds, do_raise=False, exc=None, *args, **kwargs): """Checks authorization of a rule against the target and credentials. :param rule: A string or BaseCheck instance specifying the rule to evaluate. :param target: As much information about the object being operated on as possible, as a dictionary. :param creds: As much information about the user performing the action as possible, as a dictionary. :param do_raise: Whether to raise an exception or not if check fails. :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to check() (both positional and keyword arguments) will be passed to the exception class. If not specified, PolicyNotAuthorized will be used. :return: Returns False if the policy does not allow the action and exc is not provided; otherwise, returns a value that evaluates to True. Note: for rules using the "case" expression, this True value will be the specified string from the expression. """ self.load_rules() # Allow the rule to be a Check tree if isinstance(rule, BaseCheck): result = rule(target, creds, self) elif not self.rules: # No rules to reference means we're going to fail closed result = False else: try: # Evaluate the rule result = self.rules[rule](target, creds, self) except __HOLE__: LOG.debug("Rule [%s] doesn't exist" % rule) # If the rule doesn't exist, fail closed result = False # If it is False, raise the exception if requested if do_raise and not result: if exc: raise exc(*args, **kwargs) raise PolicyNotAuthorized(rule) return result
KeyError
dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/openstack/common/policy.py/Enforcer.enforce