_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q279900
StdInSocketChannel.input
test
def input(self, string): """Send a string of raw input to the kernel.""" content = dict(value=string) msg = self.session.msg('input_reply', content) self._queue_send(msg)
python
{ "resource": "" }
q279901
KernelManager.start_channels
test
def start_channels(self, shell=True, sub=True, stdin=True, hb=True): """Starts the channels for this kernel. This will create the channels if they do not exist and then start them. If port numbers of 0 are being used (random ports) then you must first call :method:`start_kernel`. If the channels have been stopped and you call this, :class:`RuntimeError` will be raised. """ if shell: self.shell_channel.start() if sub: self.sub_channel.start() if stdin: self.stdin_channel.start() self.shell_channel.allow_stdin = True else: self.shell_channel.allow_stdin = False if hb: self.hb_channel.start()
python
{ "resource": "" }
q279902
KernelManager.stop_channels
test
def stop_channels(self): """Stops all the running channels for this kernel. """ if self.shell_channel.is_alive(): self.shell_channel.stop() if self.sub_channel.is_alive(): self.sub_channel.stop() if self.stdin_channel.is_alive(): self.stdin_channel.stop() if self.hb_channel.is_alive(): self.hb_channel.stop()
python
{ "resource": "" }
q279903
KernelManager.channels_running
test
def channels_running(self): """Are any of the channels created and running?""" return (self.shell_channel.is_alive() or self.sub_channel.is_alive() or self.stdin_channel.is_alive() or self.hb_channel.is_alive())
python
{ "resource": "" }
q279904
KernelManager.load_connection_file
test
def load_connection_file(self): """load connection info from JSON dict in self.connection_file""" with open(self.connection_file) as f: cfg = json.loads(f.read()) self.ip = cfg['ip'] self.shell_port = cfg['shell_port'] self.stdin_port = cfg['stdin_port'] self.iopub_port = cfg['iopub_port'] self.hb_port = cfg['hb_port'] self.session.key = str_to_bytes(cfg['key'])
python
{ "resource": "" }
q279905
KernelManager.write_connection_file
test
def write_connection_file(self): """write connection info to JSON dict in self.connection_file""" if self._connection_file_written: return self.connection_file,cfg = write_connection_file(self.connection_file, ip=self.ip, key=self.session.key, stdin_port=self.stdin_port, iopub_port=self.iopub_port, shell_port=self.shell_port, hb_port=self.hb_port) # write_connection_file also sets default ports: self.shell_port = cfg['shell_port'] self.stdin_port = cfg['stdin_port'] self.iopub_port = cfg['iopub_port'] self.hb_port = cfg['hb_port'] self._connection_file_written = True
python
{ "resource": "" }
q279906
KernelManager.start_kernel
test
def start_kernel(self, **kw): """Starts a kernel process and configures the manager to use it. If random ports (port=0) are being used, this method must be called before the channels are created. Parameters: ----------- launcher : callable, optional (default None) A custom function for launching the kernel process (generally a wrapper around ``entry_point.base_launch_kernel``). In most cases, it should not be necessary to use this parameter. **kw : optional See respective options for IPython and Python kernels. """ if self.ip not in LOCAL_IPS: raise RuntimeError("Can only launch a kernel on a local interface. " "Make sure that the '*_address' attributes are " "configured properly. " "Currently valid addresses are: %s"%LOCAL_IPS ) # write connection file / get default ports self.write_connection_file() self._launch_args = kw.copy() launch_kernel = kw.pop('launcher', None) if launch_kernel is None: from ipkernel import launch_kernel self.kernel = launch_kernel(fname=self.connection_file, **kw)
python
{ "resource": "" }
q279907
KernelManager.shutdown_kernel
test
def shutdown_kernel(self, restart=False): """ Attempts to the stop the kernel process cleanly. If the kernel cannot be stopped, it is killed, if possible. """ # FIXME: Shutdown does not work on Windows due to ZMQ errors! if sys.platform == 'win32': self.kill_kernel() return # Pause the heart beat channel if it exists. if self._hb_channel is not None: self._hb_channel.pause() # Don't send any additional kernel kill messages immediately, to give # the kernel a chance to properly execute shutdown actions. Wait for at # most 1s, checking every 0.1s. self.shell_channel.shutdown(restart=restart) for i in range(10): if self.is_alive: time.sleep(0.1) else: break else: # OK, we've waited long enough. if self.has_kernel: self.kill_kernel() if not restart and self._connection_file_written: # cleanup connection files on full shutdown of kernel we started self._connection_file_written = False try: os.remove(self.connection_file) except IOError: pass
python
{ "resource": "" }
q279908
KernelManager.restart_kernel
test
def restart_kernel(self, now=False, **kw): """Restarts a kernel with the arguments that were used to launch it. If the old kernel was launched with random ports, the same ports will be used for the new kernel. Parameters ---------- now : bool, optional If True, the kernel is forcefully restarted *immediately*, without having a chance to do any cleanup action. Otherwise the kernel is given 1s to clean up before a forceful restart is issued. In all cases the kernel is restarted, the only difference is whether it is given a chance to perform a clean shutdown or not. **kw : optional Any options specified here will replace those used to launch the kernel. """ if self._launch_args is None: raise RuntimeError("Cannot restart the kernel. " "No previous call to 'start_kernel'.") else: # Stop currently running kernel. if self.has_kernel: if now: self.kill_kernel() else: self.shutdown_kernel(restart=True) # Start new kernel. self._launch_args.update(kw) self.start_kernel(**self._launch_args) # FIXME: Messages get dropped in Windows due to probable ZMQ bug # unless there is some delay here. if sys.platform == 'win32': time.sleep(0.2)
python
{ "resource": "" }
q279909
KernelManager.kill_kernel
test
def kill_kernel(self): """ Kill the running kernel. """ if self.has_kernel: # Pause the heart beat channel if it exists. if self._hb_channel is not None: self._hb_channel.pause() # Attempt to kill the kernel. try: self.kernel.kill() except OSError, e: # In Windows, we will get an Access Denied error if the process # has already terminated. Ignore it. if sys.platform == 'win32': if e.winerror != 5: raise # On Unix, we may get an ESRCH error if the process has already # terminated. Ignore it. else: from errno import ESRCH if e.errno != ESRCH: raise self.kernel = None else: raise RuntimeError("Cannot kill kernel. No kernel is running!")
python
{ "resource": "" }
q279910
KernelManager.interrupt_kernel
test
def interrupt_kernel(self): """ Interrupts the kernel. Unlike ``signal_kernel``, this operation is well supported on all platforms. """ if self.has_kernel: if sys.platform == 'win32': from parentpoller import ParentPollerWindows as Poller Poller.send_interrupt(self.kernel.win32_interrupt_event) else: self.kernel.send_signal(signal.SIGINT) else: raise RuntimeError("Cannot interrupt kernel. No kernel is running!")
python
{ "resource": "" }
q279911
KernelManager.signal_kernel
test
def signal_kernel(self, signum): """ Sends a signal to the kernel. Note that since only SIGTERM is supported on Windows, this function is only useful on Unix systems. """ if self.has_kernel: self.kernel.send_signal(signum) else: raise RuntimeError("Cannot signal kernel. No kernel is running!")
python
{ "resource": "" }
q279912
KernelManager.is_alive
test
def is_alive(self): """Is the kernel process still running?""" if self.has_kernel: if self.kernel.poll() is None: return True else: return False elif self._hb_channel is not None: # We didn't start the kernel with this KernelManager so we # use the heartbeat. return self._hb_channel.is_beating() else: # no heartbeat and not local, we can't tell if it's running, # so naively return True return True
python
{ "resource": "" }
q279913
KernelManager.shell_channel
test
def shell_channel(self): """Get the REQ socket channel object to make requests of the kernel.""" if self._shell_channel is None: self._shell_channel = self.shell_channel_class(self.context, self.session, (self.ip, self.shell_port)) return self._shell_channel
python
{ "resource": "" }
q279914
KernelManager.sub_channel
test
def sub_channel(self): """Get the SUB socket channel object.""" if self._sub_channel is None: self._sub_channel = self.sub_channel_class(self.context, self.session, (self.ip, self.iopub_port)) return self._sub_channel
python
{ "resource": "" }
q279915
KernelManager.hb_channel
test
def hb_channel(self): """Get the heartbeat socket channel object to check that the kernel is alive.""" if self._hb_channel is None: self._hb_channel = self.hb_channel_class(self.context, self.session, (self.ip, self.hb_port)) return self._hb_channel
python
{ "resource": "" }
q279916
bind_kernel
test
def bind_kernel(**kwargs): """Bind an Engine's Kernel to be used as a full IPython kernel. This allows a running Engine to be used simultaneously as a full IPython kernel with the QtConsole or other frontends. This function returns immediately. """ from IPython.zmq.ipkernel import IPKernelApp from IPython.parallel.apps.ipengineapp import IPEngineApp # first check for IPKernelApp, in which case this should be a no-op # because there is already a bound kernel if IPKernelApp.initialized() and isinstance(IPKernelApp._instance, IPKernelApp): return if IPEngineApp.initialized(): try: app = IPEngineApp.instance() except MultipleInstanceError: pass else: return app.bind_kernel(**kwargs) raise RuntimeError("bind_kernel be called from an IPEngineApp instance")
python
{ "resource": "" }
q279917
ExtensionDebugger.debug
test
def debug(self, level, message): """ Emit a debugging message depending on the debugging level. :param level: The debugging level. :param message: The message to emit. """ if self._debug >= level: print(message, file=sys.stderr)
python
{ "resource": "" }
q279918
ExtensionSet._get_extension_classes
test
def _get_extension_classes(cls): """ Retrieve the extension classes in priority order. :returns: A list of extension classes, in proper priority order. """ if cls._extension_classes is None: exts = {} # Iterate over the entrypoints for ext in entry.points[NAMESPACE_EXTENSIONS]: exts.setdefault(ext.priority, []) exts[ext.priority].append(ext) # Save the list of extension classes cls._extension_classes = list(utils.iter_prio_dict(exts)) return cls._extension_classes
python
{ "resource": "" }
q279919
ExtensionSet.pre_step
test
def pre_step(self, ctxt, step, idx): """ Called prior to executing a step. :param ctxt: An instance of ``timid.context.Context``. :param step: An instance of ``timid.steps.Step`` describing the step to be executed. :param idx: The index of the step in the list of steps. :returns: A ``True`` value if the step is to be skipped, ``False`` otherwise. """ debugger = ExtensionDebugger('pre_step') for ext in self.exts: with debugger(ext): if ext.pre_step(ctxt, step, idx): # Step must be skipped debugger.debug(3, 'Skipping step %d' % idx) return True return False
python
{ "resource": "" }
q279920
ExtensionSet.post_step
test
def post_step(self, ctxt, step, idx, result): """ Called after executing a step. :param ctxt: An instance of ``timid.context.Context``. :param step: An instance of ``timid.steps.Step`` describing the step that was executed. :param idx: The index of the step in the list of steps. :param result: An instance of ``timid.steps.StepResult`` describing the result of executing the step. May be altered by the extension, e.g., to set the ``ignore`` attribute. :returns: The ``result`` parameter, for convenience. """ debugger = ExtensionDebugger('post_step') for ext in self.exts: with debugger(ext): ext.post_step(ctxt, step, idx, result) # Convenience return return result
python
{ "resource": "" }
q279921
ExtensionSet.finalize
test
def finalize(self, ctxt, result): """ Called at the end of processing. This call allows extensions to emit any additional data, such as timing information, prior to ``timid``'s exit. Extensions may also alter the return value. :param ctxt: An instance of ``timid.context.Context``. :param result: The return value of the basic ``timid`` call, or an ``Exception`` instance if an exception was raised. Without the extension, this would be passed directly to ``sys.exit()``. :returns: The final result. """ debugger = ExtensionDebugger('finalize') for ext in self.exts: with debugger(ext): result = ext.finalize(ctxt, result) return result
python
{ "resource": "" }
q279922
walk_egg
test
def walk_egg(egg_dir): """Walk an unpacked egg's contents, skipping the metadata directory""" walker = os.walk(egg_dir) base,dirs,files = walker.next() if 'EGG-INFO' in dirs: dirs.remove('EGG-INFO') yield base,dirs,files for bdf in walker: yield bdf
python
{ "resource": "" }
q279923
scan_module
test
def scan_module(egg_dir, base, name, stubs): """Check whether module possibly uses unsafe-for-zipfile stuff""" filename = os.path.join(base,name) if filename[:-1] in stubs: return True # Extension module pkg = base[len(egg_dir)+1:].replace(os.sep,'.') module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0] if sys.version_info < (3, 3): skip = 8 # skip magic & date else: skip = 12 # skip magic & date & file size f = open(filename,'rb'); f.read(skip) code = marshal.load(f); f.close() safe = True symbols = dict.fromkeys(iter_symbols(code)) for bad in ['__file__', '__path__']: if bad in symbols: log.warn("%s: module references %s", module, bad) safe = False if 'inspect' in symbols: for bad in [ 'getsource', 'getabsfile', 'getsourcefile', 'getfile' 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', 'getinnerframes', 'getouterframes', 'stack', 'trace' ]: if bad in symbols: log.warn("%s: module MAY be using inspect.%s", module, bad) safe = False if '__name__' in symbols and '__main__' in symbols and '.' not in module: if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5 log.warn("%s: top-level module may be 'python -m' script", module) safe = False return safe
python
{ "resource": "" }
q279924
launch_new_instance
test
def launch_new_instance(): """Create and run the IPython controller""" if sys.platform == 'win32': # make sure we don't get called from a multiprocessing subprocess # this can result in infinite Controllers being started on Windows # which doesn't have a proper fork, so multiprocessing is wonky # this only comes up when IPython has been installed using vanilla # setuptools, and *not* distribute. import multiprocessing p = multiprocessing.current_process() # the main process has name 'MainProcess' # subprocesses will have names like 'Process-1' if p.name != 'MainProcess': # we are a subprocess, don't start another Controller! return app = IPControllerApp.instance() app.initialize() app.start()
python
{ "resource": "" }
q279925
IPControllerApp.save_connection_dict
test
def save_connection_dict(self, fname, cdict): """save a connection dict to json file.""" c = self.config url = cdict['url'] location = cdict['location'] if not location: try: proto,ip,port = split_url(url) except AssertionError: pass else: try: location = socket.gethostbyname_ex(socket.gethostname())[2][-1] except (socket.gaierror, IndexError): self.log.warn("Could not identify this machine's IP, assuming 127.0.0.1." " You may need to specify '--location=<external_ip_address>' to help" " IPython decide when to connect via loopback.") location = '127.0.0.1' cdict['location'] = location fname = os.path.join(self.profile_dir.security_dir, fname) self.log.info("writing connection info to %s", fname) with open(fname, 'w') as f: f.write(json.dumps(cdict, indent=2)) os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR)
python
{ "resource": "" }
q279926
IPControllerApp.load_config_from_json
test
def load_config_from_json(self): """load config from existing json connector files.""" c = self.config self.log.debug("loading config from JSON") # load from engine config fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file) self.log.info("loading connection info from %s", fname) with open(fname) as f: cfg = json.loads(f.read()) key = cfg['exec_key'] # json gives unicode, Session.key wants bytes c.Session.key = key.encode('ascii') xport,addr = cfg['url'].split('://') c.HubFactory.engine_transport = xport ip,ports = addr.split(':') c.HubFactory.engine_ip = ip c.HubFactory.regport = int(ports) self.location = cfg['location'] if not self.engine_ssh_server: self.engine_ssh_server = cfg['ssh'] # load client config fname = os.path.join(self.profile_dir.security_dir, self.client_json_file) self.log.info("loading connection info from %s", fname) with open(fname) as f: cfg = json.loads(f.read()) assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys" xport,addr = cfg['url'].split('://') c.HubFactory.client_transport = xport ip,ports = addr.split(':') c.HubFactory.client_ip = ip if not self.ssh_server: self.ssh_server = cfg['ssh'] assert int(ports) == c.HubFactory.regport, "regport mismatch"
python
{ "resource": "" }
q279927
IPControllerApp.load_secondary_config
test
def load_secondary_config(self): """secondary config, loading from JSON and setting defaults""" if self.reuse_files: try: self.load_config_from_json() except (AssertionError,IOError) as e: self.log.error("Could not load config from JSON: %s" % e) else: # successfully loaded config from JSON, and reuse=True # no need to wite back the same file self.write_connection_files = False # switch Session.key default to secure default_secure(self.config) self.log.debug("Config changed") self.log.debug(repr(self.config))
python
{ "resource": "" }
q279928
ParallelMagics.parallel_execute
test
def parallel_execute(self, cell, block=None, groupby='type', save_name=None): """implementation used by %px and %%parallel""" # defaults: block = self.view.block if block is None else block base = "Parallel" if block else "Async parallel" targets = self.view.targets if isinstance(targets, list) and len(targets) > 10: str_targets = str(targets[:4])[:-1] + ', ..., ' + str(targets[-4:])[1:] else: str_targets = str(targets) if self.verbose: print base + " execution on engine(s): %s" % str_targets result = self.view.execute(cell, silent=False, block=False) self.last_result = result if save_name: self.shell.user_ns[save_name] = result if block: result.get() result.display_outputs(groupby) else: # return AsyncResult only on non-blocking submission return result
python
{ "resource": "" }
q279929
ParallelMagics._enable_autopx
test
def _enable_autopx(self): """Enable %autopx mode by saving the original run_cell and installing pxrun_cell. """ # override run_cell self._original_run_cell = self.shell.run_cell self.shell.run_cell = self.pxrun_cell self._autopx = True print "%autopx enabled"
python
{ "resource": "" }
q279930
ParallelMagics._disable_autopx
test
def _disable_autopx(self): """Disable %autopx by restoring the original InteractiveShell.run_cell. """ if self._autopx: self.shell.run_cell = self._original_run_cell self._autopx = False print "%autopx disabled"
python
{ "resource": "" }
q279931
ParallelMagics.pxrun_cell
test
def pxrun_cell(self, raw_cell, store_history=False, silent=False): """drop-in replacement for InteractiveShell.run_cell. This executes code remotely, instead of in the local namespace. See InteractiveShell.run_cell for details. """ if (not raw_cell) or raw_cell.isspace(): return ipself = self.shell with ipself.builtin_trap: cell = ipself.prefilter_manager.prefilter_lines(raw_cell) # Store raw and processed history if store_history: ipself.history_manager.store_inputs(ipself.execution_count, cell, raw_cell) # ipself.logger.log(cell, raw_cell) cell_name = ipself.compile.cache(cell, ipself.execution_count) try: ast.parse(cell, filename=cell_name) except (OverflowError, SyntaxError, ValueError, TypeError, MemoryError): # Case 1 ipself.showsyntaxerror() ipself.execution_count += 1 return None except NameError: # ignore name errors, because we don't know the remote keys pass if store_history: # Write output to the database. Does nothing unless # history output logging is enabled. ipself.history_manager.store_output(ipself.execution_count) # Each cell is a *single* input, regardless of how many lines it has ipself.execution_count += 1 if re.search(r'get_ipython\(\)\.magic\(u?["\']%?autopx', cell): self._disable_autopx() return False else: try: result = self.view.execute(cell, silent=False, block=False) except: ipself.showtraceback() return True else: if self.view.block: try: result.get() except: self.shell.showtraceback() return True else: with ipself.builtin_trap: result.display_outputs() return False
python
{ "resource": "" }
q279932
run_heartbeat
test
def run_heartbeat(message): """Internal ``CLOCK_CHANNEL`` consumer to process task runs""" then = arrow.get(message['time']) now = arrow.get() if (now - then) > timezone.timedelta(seconds=(TICK_FREQ+1)): pass # discard old ticks else: Task.run_tasks()
python
{ "resource": "" }
q279933
run_task
test
def run_task(message): """Internal ``RUN_TASK`` consumer to run the task's callable""" task = Task.objects.get(pk=message['id']) if task.allow_overlap: task.run(message) else: if not task.running: task.running = True task.save() try: task.run(message) finally: task.running = False task.save()
python
{ "resource": "" }
q279934
remove_task
test
def remove_task(message): """Internal ``KILL_TASK`` consumer to remove retired tasks""" task = Task.objects.get(pk=message['id']) task.delete()
python
{ "resource": "" }
q279935
patch_protocol_for_agent
test
def patch_protocol_for_agent(protocol): """ Patch the protocol's makeConnection and connectionLost methods to make the protocol and its transport behave more like what `Agent` expects. While `Agent` is the driving force behind this, other clients and servers will no doubt have similar requirements. """ old_makeConnection = protocol.makeConnection old_connectionLost = protocol.connectionLost def new_makeConnection(transport): patch_transport_fake_push_producer(transport) patch_transport_abortConnection(transport, protocol) return old_makeConnection(transport) def new_connectionLost(reason): # Replace ConnectionDone with ConnectionAborted if we aborted. if protocol._fake_connection_aborted and reason.check(ConnectionDone): reason = Failure(ConnectionAborted()) return old_connectionLost(reason) protocol.makeConnection = new_makeConnection protocol.connectionLost = new_connectionLost protocol._fake_connection_aborted = False
python
{ "resource": "" }
q279936
patch_if_missing
test
def patch_if_missing(obj, name, method): """ Patch a method onto an object if it isn't already there. """ setattr(obj, name, getattr(obj, name, method))
python
{ "resource": "" }
q279937
FakeConnection.accept_connection
test
def accept_connection(self): """ Accept a pending connection. """ assert self.pending, "Connection is not pending." self.server_protocol = self.server.server_factory.buildProtocol(None) self._accept_d.callback( FakeServerProtocolWrapper(self, self.server_protocol)) return self.await_connected()
python
{ "resource": "" }
q279938
FakeConnection.reject_connection
test
def reject_connection(self, reason=None): """ Reject a pending connection. """ assert self.pending, "Connection is not pending." if reason is None: reason = ConnectionRefusedError() self._accept_d.errback(reason)
python
{ "resource": "" }
q279939
FakeHttpServer.get_agent
test
def get_agent(self, reactor=None, contextFactory=None): """ Returns an IAgent that makes requests to this fake server. """ return ProxyAgentWithContext( self.endpoint, reactor=reactor, contextFactory=contextFactory)
python
{ "resource": "" }
q279940
SaveHookMixin.form_valid
test
def form_valid(self, form): """ Calls pre and post save hooks. """ self.object = form.save(commit=False) # Invoke pre_save hook, and allow it to abort the saving # process and do a redirect. response = self.pre_save(self.object) if response: return response self.object.save() form.save_m2m() self.post_save(self.object) return HttpResponseRedirect(self.get_success_url())
python
{ "resource": "" }
q279941
SaveHookMixin.delete
test
def delete(self, request, *args, **kwargs): """ Calls pre and post delete hooks for DelteViews. """ self.object = self.get_object() success_url = self.get_success_url() self.pre_delete(self.object) self.object.delete() self.post_delete(self.object) return HttpResponseRedirect(success_url)
python
{ "resource": "" }
q279942
UserViewMixin.pre_save
test
def pre_save(self, instance): super(UserViewMixin, self).pre_save(instance) """ Use SaveHookMixin pre_save to set the user. """ if self.request.user.is_authenticated(): for field in self.user_field: setattr(instance, field, self.request.user)
python
{ "resource": "" }
q279943
SummaryReporter.report
test
def report(self, morfs, outfile=None): """Writes a report summarizing coverage statistics per module. `outfile` is a file object to write the summary to. """ self.find_code_units(morfs) # Prepare the formatting strings max_name = max([len(cu.name) for cu in self.code_units] + [5]) fmt_name = "%%- %ds " % max_name fmt_err = "%s %s: %s\n" header = (fmt_name % "Name") + " Stmts Miss" fmt_coverage = fmt_name + "%6d %6d" if self.branches: header += " Branch BrMiss" fmt_coverage += " %6d %6d" width100 = Numbers.pc_str_width() header += "%*s" % (width100+4, "Cover") fmt_coverage += "%%%ds%%%%" % (width100+3,) if self.config.show_missing: header += " Missing" fmt_coverage += " %s" rule = "-" * len(header) + "\n" header += "\n" fmt_coverage += "\n" if not outfile: outfile = sys.stdout # Write the header outfile.write(header) outfile.write(rule) total = Numbers() for cu in self.code_units: try: analysis = self.coverage._analyze(cu) nums = analysis.numbers args = (cu.name, nums.n_statements, nums.n_missing) if self.branches: args += (nums.n_branches, nums.n_missing_branches) args += (nums.pc_covered_str,) if self.config.show_missing: args += (analysis.missing_formatted(),) outfile.write(fmt_coverage % args) total += nums except KeyboardInterrupt: # pragma: not covered raise except: report_it = not self.config.ignore_errors if report_it: typ, msg = sys.exc_info()[:2] if typ is NotPython and not cu.should_be_python(): report_it = False if report_it: outfile.write(fmt_err % (cu.name, typ.__name__, msg)) if total.n_files > 1: outfile.write(rule) args = ("TOTAL", total.n_statements, total.n_missing) if self.branches: args += (total.n_branches, total.n_missing_branches) args += (total.pc_covered_str,) if self.config.show_missing: args += ("",) outfile.write(fmt_coverage % args) return total.pc_covered
python
{ "resource": "" }
q279944
ModuleReloader.check
test
def check(self, check_all=False): """Check whether some modules need to be reloaded.""" if not self.enabled and not check_all: return if check_all or self.check_all: modules = sys.modules.keys() else: modules = self.modules.keys() for modname in modules: m = sys.modules.get(modname, None) if modname in self.skip_modules: continue if not hasattr(m, '__file__'): continue if m.__name__ == '__main__': # we cannot reload(__main__) continue filename = m.__file__ path, ext = os.path.splitext(filename) if ext.lower() == '.py': ext = PY_COMPILED_EXT pyc_filename = pyfile.cache_from_source(filename) py_filename = filename else: pyc_filename = filename try: py_filename = pyfile.source_from_cache(filename) except ValueError: continue try: pymtime = os.stat(py_filename).st_mtime if pymtime <= os.stat(pyc_filename).st_mtime: continue if self.failed.get(py_filename, None) == pymtime: continue except OSError: continue try: superreload(m, reload, self.old_objects) if py_filename in self.failed: del self.failed[py_filename] except: print >> sys.stderr, "[autoreload of %s failed: %s]" % ( modname, traceback.format_exc(1)) self.failed[py_filename] = pymtime
python
{ "resource": "" }
q279945
editor
test
def editor(self, filename, linenum=None, wait=True): """Open the default editor at the given filename and linenumber. This is IPython's default editor hook, you can use it as an example to write your own modified one. To set your own editor function as the new editor hook, call ip.set_hook('editor',yourfunc).""" # IPython configures a default editor at startup by reading $EDITOR from # the environment, and falling back on vi (unix) or notepad (win32). editor = self.editor # marker for at which line to open the file (for existing objects) if linenum is None or editor=='notepad': linemark = '' else: linemark = '+%d' % int(linenum) # Enclose in quotes if necessary and legal if ' ' in editor and os.path.isfile(editor) and editor[0] != '"': editor = '"%s"' % editor # Call the actual editor proc = subprocess.Popen('%s %s %s' % (editor, linemark, filename), shell=True) if wait and proc.wait() != 0: raise TryNext()
python
{ "resource": "" }
q279946
fix_error_editor
test
def fix_error_editor(self,filename,linenum,column,msg): """Open the editor at the given filename, linenumber, column and show an error message. This is used for correcting syntax errors. The current implementation only has special support for the VIM editor, and falls back on the 'editor' hook if VIM is not used. Call ip.set_hook('fix_error_editor',youfunc) to use your own function, """ def vim_quickfix_file(): t = tempfile.NamedTemporaryFile() t.write('%s:%d:%d:%s\n' % (filename,linenum,column,msg)) t.flush() return t if os.path.basename(self.editor) != 'vim': self.hooks.editor(filename,linenum) return t = vim_quickfix_file() try: if os.system('vim --cmd "set errorformat=%f:%l:%c:%m" -q ' + t.name): raise TryNext() finally: t.close()
python
{ "resource": "" }
q279947
clipboard_get
test
def clipboard_get(self): """ Get text from the clipboard. """ from IPython.lib.clipboard import ( osx_clipboard_get, tkinter_clipboard_get, win32_clipboard_get ) if sys.platform == 'win32': chain = [win32_clipboard_get, tkinter_clipboard_get] elif sys.platform == 'darwin': chain = [osx_clipboard_get, tkinter_clipboard_get] else: chain = [tkinter_clipboard_get] dispatcher = CommandChainDispatcher() for func in chain: dispatcher.add(func) text = dispatcher() return text
python
{ "resource": "" }
q279948
CommandChainDispatcher.add
test
def add(self, func, priority=0): """ Add a func to the cmd chain with given priority """ self.chain.append((priority, func)) self.chain.sort(key=lambda x: x[0])
python
{ "resource": "" }
q279949
get_metadata
test
def get_metadata(path_or_module, metadata_version=None): """ Try to create a Distribution 'path_or_module'. o 'path_or_module' may be a module object. o If a string, 'path_or_module' may point to an sdist file, a bdist file, an installed package, or a working checkout (if it contains PKG-INFO). o Return None if 'path_or_module' can't be parsed. """ if isinstance(path_or_module, ModuleType): try: return Installed(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass try: __import__(path_or_module) except ImportError: pass else: try: return Installed(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass if os.path.isfile(path_or_module): try: return SDist(path_or_module, metadata_version) except (ValueError, IOError): pass try: return BDist(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass try: return Wheel(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass if os.path.isdir(path_or_module): try: return Develop(path_or_module, metadata_version) except (ValueError, IOError): #pragma NO COVER pass
python
{ "resource": "" }
q279950
Pdb.configure
test
def configure(self, options, conf): """Configure which kinds of exceptions trigger plugin. """ self.conf = conf self.enabled = options.debugErrors or options.debugFailures self.enabled_for_errors = options.debugErrors self.enabled_for_failures = options.debugFailures
python
{ "resource": "" }
q279951
import_item
test
def import_item(name): """Import and return bar given the string foo.bar.""" package = '.'.join(name.split('.')[0:-1]) obj = name.split('.')[-1] # Note: the original code for this was the following. We've left it # visible for now in case the new implementation shows any problems down # the road, to make it easier on anyone looking for a problem. This code # should be removed once we're comfortable we didn't break anything. ## execString = 'from %s import %s' % (package, obj) ## try: ## exec execString ## except SyntaxError: ## raise ImportError("Invalid class specification: %s" % name) ## exec 'temp = %s' % obj ## return temp if package: module = __import__(package,fromlist=[obj]) try: pak = module.__dict__[obj] except KeyError: raise ImportError('No module named %s' % obj) return pak else: return __import__(obj)
python
{ "resource": "" }
q279952
try_passwordless_ssh
test
def try_passwordless_ssh(server, keyfile, paramiko=None): """Attempt to make an ssh connection without a password. This is mainly used for requiring password input only once when many tunnels may be connected to the same server. If paramiko is None, the default for the platform is chosen. """ if paramiko is None: paramiko = sys.platform == 'win32' if not paramiko: f = _try_passwordless_openssh else: f = _try_passwordless_paramiko return f(server, keyfile)
python
{ "resource": "" }
q279953
_try_passwordless_openssh
test
def _try_passwordless_openssh(server, keyfile): """Try passwordless login with shell ssh command.""" if pexpect is None: raise ImportError("pexpect unavailable, use paramiko") cmd = 'ssh -f '+ server if keyfile: cmd += ' -i ' + keyfile cmd += ' exit' p = pexpect.spawn(cmd) while True: try: p.expect('[Pp]assword:', timeout=.1) except pexpect.TIMEOUT: continue except pexpect.EOF: return True else: return False
python
{ "resource": "" }
q279954
_try_passwordless_paramiko
test
def _try_passwordless_paramiko(server, keyfile): """Try passwordless login with paramiko.""" if paramiko is None: msg = "Paramiko unavaliable, " if sys.platform == 'win32': msg += "Paramiko is required for ssh tunneled connections on Windows." else: msg += "use OpenSSH." raise ImportError(msg) username, server, port = _split_server(server) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) try: client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True) except paramiko.AuthenticationException: return False else: client.close() return True
python
{ "resource": "" }
q279955
tunnel_connection
test
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60): """Connect a socket to an address via an ssh tunnel. This is a wrapper for socket.connect(addr), when addr is not accessible from the local machine. It simply creates an ssh tunnel using the remaining args, and calls socket.connect('tcp://localhost:lport') where lport is the randomly selected local port of the tunnel. """ new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout) socket.connect(new_url) return tunnel
python
{ "resource": "" }
q279956
open_tunnel
test
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60): """Open a tunneled connection from a 0MQ url. For use inside tunnel_connection. Returns ------- (url, tunnel): The 0MQ url that has been forwarded, and the tunnel object """ lport = select_random_ports(1)[0] transport, addr = addr.split('://') ip,rport = addr.split(':') rport = int(rport) if paramiko is None: paramiko = sys.platform == 'win32' if paramiko: tunnelf = paramiko_tunnel else: tunnelf = openssh_tunnel tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout) return 'tcp://127.0.0.1:%i'%lport, tunnel
python
{ "resource": "" }
q279957
Client._stop_scheduling_tasks
test
def _stop_scheduling_tasks(self): """Stop scheduling tasks because an engine has been unregistered from a pure ZMQ scheduler. """ self._task_socket.close() self._task_socket = None msg = "An engine has been unregistered, and we are using pure " +\ "ZMQ task scheduling. Task farming will be disabled." if self.outstanding: msg += " If you were running tasks when this happened, " +\ "some `outstanding` msg_ids may never resolve." warnings.warn(msg, RuntimeWarning)
python
{ "resource": "" }
q279958
Client._unwrap_exception
test
def _unwrap_exception(self, content): """unwrap exception, and remap engine_id to int.""" e = error.unwrap_exception(content) # print e.traceback if e.engine_info: e_uuid = e.engine_info['engine_uuid'] eid = self._engines[e_uuid] e.engine_info['engine_id'] = eid return e
python
{ "resource": "" }
q279959
Client._register_engine
test
def _register_engine(self, msg): """Register a new engine, and update our connection info.""" content = msg['content'] eid = content['id'] d = {eid : content['queue']} self._update_engines(d)
python
{ "resource": "" }
q279960
Client._unregister_engine
test
def _unregister_engine(self, msg): """Unregister an engine that has died.""" content = msg['content'] eid = int(content['id']) if eid in self._ids: self._ids.remove(eid) uuid = self._engines.pop(eid) self._handle_stranded_msgs(eid, uuid) if self._task_socket and self._task_scheme == 'pure': self._stop_scheduling_tasks()
python
{ "resource": "" }
q279961
Client._handle_execute_reply
test
def _handle_execute_reply(self, msg): """Save the reply to an execute_request into our results. execute messages are never actually used. apply is used instead. """ parent = msg['parent_header'] msg_id = parent['msg_id'] if msg_id not in self.outstanding: if msg_id in self.history: print ("got stale result: %s"%msg_id) else: print ("got unknown result: %s"%msg_id) else: self.outstanding.remove(msg_id) content = msg['content'] header = msg['header'] # construct metadata: md = self.metadata[msg_id] md.update(self._extract_metadata(header, parent, content)) # is this redundant? self.metadata[msg_id] = md e_outstanding = self._outstanding_dict[md['engine_uuid']] if msg_id in e_outstanding: e_outstanding.remove(msg_id) # construct result: if content['status'] == 'ok': self.results[msg_id] = ExecuteReply(msg_id, content, md) elif content['status'] == 'aborted': self.results[msg_id] = error.TaskAborted(msg_id) elif content['status'] == 'resubmitted': # TODO: handle resubmission pass else: self.results[msg_id] = self._unwrap_exception(content)
python
{ "resource": "" }
q279962
Client._flush_notifications
test
def _flush_notifications(self): """Flush notifications of engine registrations waiting in ZMQ queue.""" idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK) while msg is not None: if self.debug: pprint(msg) msg_type = msg['header']['msg_type'] handler = self._notification_handlers.get(msg_type, None) if handler is None: raise Exception("Unhandled message type: %s"%msg.msg_type) else: handler(msg) idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
python
{ "resource": "" }
q279963
Client._flush_results
test
def _flush_results(self, sock): """Flush task or queue results waiting in ZMQ queue.""" idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) while msg is not None: if self.debug: pprint(msg) msg_type = msg['header']['msg_type'] handler = self._queue_handlers.get(msg_type, None) if handler is None: raise Exception("Unhandled message type: %s"%msg.msg_type) else: handler(msg) idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
python
{ "resource": "" }
q279964
Client._flush_control
test
def _flush_control(self, sock): """Flush replies from the control channel waiting in the ZMQ queue. Currently: ignore them.""" if self._ignored_control_replies <= 0: return idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) while msg is not None: self._ignored_control_replies -= 1 if self.debug: pprint(msg) idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
python
{ "resource": "" }
q279965
Client._flush_ignored_control
test
def _flush_ignored_control(self): """flush ignored control replies""" while self._ignored_control_replies > 0: self.session.recv(self._control_socket) self._ignored_control_replies -= 1
python
{ "resource": "" }
q279966
Client._flush_iopub
test
def _flush_iopub(self, sock): """Flush replies from the iopub channel waiting in the ZMQ queue. """ idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) while msg is not None: if self.debug: pprint(msg) parent = msg['parent_header'] # ignore IOPub messages with no parent. # Caused by print statements or warnings from before the first execution. if not parent: continue msg_id = parent['msg_id'] content = msg['content'] header = msg['header'] msg_type = msg['header']['msg_type'] # init metadata: md = self.metadata[msg_id] if msg_type == 'stream': name = content['name'] s = md[name] or '' md[name] = s + content['data'] elif msg_type == 'pyerr': md.update({'pyerr' : self._unwrap_exception(content)}) elif msg_type == 'pyin': md.update({'pyin' : content['code']}) elif msg_type == 'display_data': md['outputs'].append(content) elif msg_type == 'pyout': md['pyout'] = content elif msg_type == 'status': # idle message comes after all outputs if content['execution_state'] == 'idle': md['outputs_ready'] = True else: # unhandled msg_type (status, etc.) pass # reduntant? self.metadata[msg_id] = md idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
python
{ "resource": "" }
q279967
Client._spin_every
test
def _spin_every(self, interval=1): """target func for use in spin_thread""" while True: if self._stop_spinning.is_set(): return time.sleep(interval) self.spin()
python
{ "resource": "" }
q279968
Client.stop_spin_thread
test
def stop_spin_thread(self): """stop background spin_thread, if any""" if self._spin_thread is not None: self._stop_spinning.set() self._spin_thread.join() self._spin_thread = None
python
{ "resource": "" }
q279969
Client.spin
test
def spin(self): """Flush any registration notifications and execution results waiting in the ZMQ queue. """ if self._notification_socket: self._flush_notifications() if self._iopub_socket: self._flush_iopub(self._iopub_socket) if self._mux_socket: self._flush_results(self._mux_socket) if self._task_socket: self._flush_results(self._task_socket) if self._control_socket: self._flush_control(self._control_socket) if self._query_socket: self._flush_ignored_hub_replies()
python
{ "resource": "" }
q279970
Client.wait
test
def wait(self, jobs=None, timeout=-1): """waits on one or more `jobs`, for up to `timeout` seconds. Parameters ---------- jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects ints are indices to self.history strs are msg_ids default: wait on all outstanding messages timeout : float a time in seconds, after which to give up. default is -1, which means no timeout Returns ------- True : when all msg_ids are done False : timeout reached, some msg_ids still outstanding """ tic = time.time() if jobs is None: theids = self.outstanding else: if isinstance(jobs, (int, basestring, AsyncResult)): jobs = [jobs] theids = set() for job in jobs: if isinstance(job, int): # index access job = self.history[job] elif isinstance(job, AsyncResult): map(theids.add, job.msg_ids) continue theids.add(job) if not theids.intersection(self.outstanding): return True self.spin() while theids.intersection(self.outstanding): if timeout >= 0 and ( time.time()-tic ) > timeout: break time.sleep(1e-3) self.spin() return len(theids.intersection(self.outstanding)) == 0
python
{ "resource": "" }
q279971
Client.send_apply_request
test
def send_apply_request(self, socket, f, args=None, kwargs=None, subheader=None, track=False, ident=None): """construct and send an apply message via a socket. This is the principal method with which all engine execution is performed by views. """ if self._closed: raise RuntimeError("Client cannot be used after its sockets have been closed") # defaults: args = args if args is not None else [] kwargs = kwargs if kwargs is not None else {} subheader = subheader if subheader is not None else {} # validate arguments if not callable(f) and not isinstance(f, Reference): raise TypeError("f must be callable, not %s"%type(f)) if not isinstance(args, (tuple, list)): raise TypeError("args must be tuple or list, not %s"%type(args)) if not isinstance(kwargs, dict): raise TypeError("kwargs must be dict, not %s"%type(kwargs)) if not isinstance(subheader, dict): raise TypeError("subheader must be dict, not %s"%type(subheader)) bufs = util.pack_apply_message(f,args,kwargs) msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident, subheader=subheader, track=track) msg_id = msg['header']['msg_id'] self.outstanding.add(msg_id) if ident: # possibly routed to a specific engine if isinstance(ident, list): ident = ident[-1] if ident in self._engines.values(): # save for later, in case of engine death self._outstanding_dict[ident].add(msg_id) self.history.append(msg_id) self.metadata[msg_id]['submitted'] = datetime.now() return msg
python
{ "resource": "" }
q279972
Client.send_execute_request
test
def send_execute_request(self, socket, code, silent=True, subheader=None, ident=None): """construct and send an execute request via a socket. """ if self._closed: raise RuntimeError("Client cannot be used after its sockets have been closed") # defaults: subheader = subheader if subheader is not None else {} # validate arguments if not isinstance(code, basestring): raise TypeError("code must be text, not %s" % type(code)) if not isinstance(subheader, dict): raise TypeError("subheader must be dict, not %s" % type(subheader)) content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={}) msg = self.session.send(socket, "execute_request", content=content, ident=ident, subheader=subheader) msg_id = msg['header']['msg_id'] self.outstanding.add(msg_id) if ident: # possibly routed to a specific engine if isinstance(ident, list): ident = ident[-1] if ident in self._engines.values(): # save for later, in case of engine death self._outstanding_dict[ident].add(msg_id) self.history.append(msg_id) self.metadata[msg_id]['submitted'] = datetime.now() return msg
python
{ "resource": "" }
q279973
Client.get_result
test
def get_result(self, indices_or_msg_ids=None, block=None): """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object. If the client already has the results, no request to the Hub will be made. This is a convenient way to construct AsyncResult objects, which are wrappers that include metadata about execution, and allow for awaiting results that were not submitted by this Client. It can also be a convenient way to retrieve the metadata associated with blocking execution, since it always retrieves Examples -------- :: In [10]: r = client.apply() Parameters ---------- indices_or_msg_ids : integer history index, str msg_id, or list of either The indices or msg_ids of indices to be retrieved block : bool Whether to wait for the result to be done Returns ------- AsyncResult A single AsyncResult object will always be returned. AsyncHubResult A subclass of AsyncResult that retrieves results from the Hub """ block = self.block if block is None else block if indices_or_msg_ids is None: indices_or_msg_ids = -1 if not isinstance(indices_or_msg_ids, (list,tuple)): indices_or_msg_ids = [indices_or_msg_ids] theids = [] for id in indices_or_msg_ids: if isinstance(id, int): id = self.history[id] if not isinstance(id, basestring): raise TypeError("indices must be str or int, not %r"%id) theids.append(id) local_ids = filter(lambda msg_id: msg_id in self.history or msg_id in self.results, theids) remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids) if remote_ids: ar = AsyncHubResult(self, msg_ids=theids) else: ar = AsyncResult(self, msg_ids=theids) if block: ar.wait() return ar
python
{ "resource": "" }
q279974
Client.queue_status
test
def queue_status(self, targets='all', verbose=False): """Fetch the status of engine queues. Parameters ---------- targets : int/str/list of ints/strs the engines whose states are to be queried. default : all verbose : bool Whether to return lengths only, or lists of ids for each element """ if targets == 'all': # allow 'all' to be evaluated on the engine engine_ids = None else: engine_ids = self._build_targets(targets)[1] content = dict(targets=engine_ids, verbose=verbose) self.session.send(self._query_socket, "queue_request", content=content) idents,msg = self.session.recv(self._query_socket, 0) if self.debug: pprint(msg) content = msg['content'] status = content.pop('status') if status != 'ok': raise self._unwrap_exception(content) content = rekey(content) if isinstance(targets, int): return content[targets] else: return content
python
{ "resource": "" }
q279975
Client.purge_results
test
def purge_results(self, jobs=[], targets=[]): """Tell the Hub to forget results. Individual results can be purged by msg_id, or the entire history of specific targets can be purged. Use `purge_results('all')` to scrub everything from the Hub's db. Parameters ---------- jobs : str or list of str or AsyncResult objects the msg_ids whose results should be forgotten. targets : int/str/list of ints/strs The targets, by int_id, whose entire history is to be purged. default : None """ if not targets and not jobs: raise ValueError("Must specify at least one of `targets` and `jobs`") if targets: targets = self._build_targets(targets)[1] # construct msg_ids from jobs if jobs == 'all': msg_ids = jobs else: msg_ids = [] if isinstance(jobs, (basestring,AsyncResult)): jobs = [jobs] bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs) if bad_ids: raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0]) for j in jobs: if isinstance(j, AsyncResult): msg_ids.extend(j.msg_ids) else: msg_ids.append(j) content = dict(engine_ids=targets, msg_ids=msg_ids) self.session.send(self._query_socket, "purge_request", content=content) idents, msg = self.session.recv(self._query_socket, 0) if self.debug: pprint(msg) content = msg['content'] if content['status'] != 'ok': raise self._unwrap_exception(content)
python
{ "resource": "" }
q279976
Client.hub_history
test
def hub_history(self): """Get the Hub's history Just like the Client, the Hub has a history, which is a list of msg_ids. This will contain the history of all clients, and, depending on configuration, may contain history across multiple cluster sessions. Any msg_id returned here is a valid argument to `get_result`. Returns ------- msg_ids : list of strs list of all msg_ids, ordered by task submission time. """ self.session.send(self._query_socket, "history_request", content={}) idents, msg = self.session.recv(self._query_socket, 0) if self.debug: pprint(msg) content = msg['content'] if content['status'] != 'ok': raise self._unwrap_exception(content) else: return content['history']
python
{ "resource": "" }
q279977
Client.db_query
test
def db_query(self, query, keys=None): """Query the Hub's TaskRecord database This will return a list of task record dicts that match `query` Parameters ---------- query : mongodb query dict The search dict. See mongodb query docs for details. keys : list of strs [optional] The subset of keys to be returned. The default is to fetch everything but buffers. 'msg_id' will *always* be included. """ if isinstance(keys, basestring): keys = [keys] content = dict(query=query, keys=keys) self.session.send(self._query_socket, "db_request", content=content) idents, msg = self.session.recv(self._query_socket, 0) if self.debug: pprint(msg) content = msg['content'] if content['status'] != 'ok': raise self._unwrap_exception(content) records = content['records'] buffer_lens = content['buffer_lens'] result_buffer_lens = content['result_buffer_lens'] buffers = msg['buffers'] has_bufs = buffer_lens is not None has_rbufs = result_buffer_lens is not None for i,rec in enumerate(records): # relink buffers if has_bufs: blen = buffer_lens[i] rec['buffers'], buffers = buffers[:blen],buffers[blen:] if has_rbufs: blen = result_buffer_lens[i] rec['result_buffers'], buffers = buffers[:blen],buffers[blen:] return records
python
{ "resource": "" }
q279978
_opcode_set
test
def _opcode_set(*names): """Return a set of opcodes by the names in `names`.""" s = set() for name in names: try: s.add(_opcode(name)) except KeyError: pass return s
python
{ "resource": "" }
q279979
CodeParser._get_byte_parser
test
def _get_byte_parser(self): """Create a ByteParser on demand.""" if not self._byte_parser: self._byte_parser = \ ByteParser(text=self.text, filename=self.filename) return self._byte_parser
python
{ "resource": "" }
q279980
CodeParser.lines_matching
test
def lines_matching(self, *regexes): """Find the lines matching one of a list of regexes. Returns a set of line numbers, the lines that contain a match for one of the regexes in `regexes`. The entire line needn't match, just a part of it. """ regex_c = re.compile(join_regex(regexes)) matches = set() for i, ltext in enumerate(self.lines): if regex_c.search(ltext): matches.add(i+1) return matches
python
{ "resource": "" }
q279981
CodeParser._raw_parse
test
def _raw_parse(self): """Parse the source to find the interesting facts about its lines. A handful of member fields are updated. """ # Find lines which match an exclusion pattern. if self.exclude: self.excluded = self.lines_matching(self.exclude) # Tokenize, to find excluded suites, to find docstrings, and to find # multi-line statements. indent = 0 exclude_indent = 0 excluding = False prev_toktype = token.INDENT first_line = None empty = True tokgen = generate_tokens(self.text) for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: if self.show_tokens: # pragma: not covered print("%10s %5s %-20r %r" % ( tokenize.tok_name.get(toktype, toktype), nice_pair((slineno, elineno)), ttext, ltext )) if toktype == token.INDENT: indent += 1 elif toktype == token.DEDENT: indent -= 1 elif toktype == token.NAME and ttext == 'class': # Class definitions look like branches in the byte code, so # we need to exclude them. The simplest way is to note the # lines with the 'class' keyword. self.classdefs.add(slineno) elif toktype == token.OP and ttext == ':': if not excluding and elineno in self.excluded: # Start excluding a suite. We trigger off of the colon # token so that the #pragma comment will be recognized on # the same line as the colon. exclude_indent = indent excluding = True elif toktype == token.STRING and prev_toktype == token.INDENT: # Strings that are first on an indented line are docstrings. # (a trick from trace.py in the stdlib.) This works for # 99.9999% of cases. For the rest (!) see: # http://stackoverflow.com/questions/1769332/x/1769794#1769794 self.docstrings.update(range(slineno, elineno+1)) elif toktype == token.NEWLINE: if first_line is not None and elineno != first_line: # We're at the end of a line, and we've ended on a # different line than the first line of the statement, # so record a multi-line range. rng = (first_line, elineno) for l in range(first_line, elineno+1): self.multiline[l] = rng first_line = None if ttext.strip() and toktype != tokenize.COMMENT: # A non-whitespace token. empty = False if first_line is None: # The token is not whitespace, and is the first in a # statement. first_line = slineno # Check whether to end an excluded suite. if excluding and indent <= exclude_indent: excluding = False if excluding: self.excluded.add(elineno) prev_toktype = toktype # Find the starts of the executable statements. if not empty: self.statement_starts.update(self.byte_parser._find_statements())
python
{ "resource": "" }
q279982
CodeParser.first_line
test
def first_line(self, line): """Return the first line number of the statement including `line`.""" rng = self.multiline.get(line) if rng: first_line = rng[0] else: first_line = line return first_line
python
{ "resource": "" }
q279983
CodeParser.first_lines
test
def first_lines(self, lines, *ignores): """Map the line numbers in `lines` to the correct first line of the statement. Skip any line mentioned in any of the sequences in `ignores`. Returns a set of the first lines. """ ignore = set() for ign in ignores: ignore.update(ign) lset = set() for l in lines: if l in ignore: continue new_l = self.first_line(l) if new_l not in ignore: lset.add(new_l) return lset
python
{ "resource": "" }
q279984
CodeParser.parse_source
test
def parse_source(self): """Parse source text to find executable lines, excluded lines, etc. Return values are 1) a set of executable line numbers, and 2) a set of excluded line numbers. Reported line numbers are normalized to the first line of multi-line statements. """ try: self._raw_parse() except (tokenize.TokenError, IndentationError): _, tokerr, _ = sys.exc_info() msg, lineno = tokerr.args raise NotPython( "Couldn't parse '%s' as Python source: '%s' at %s" % (self.filename, msg, lineno) ) excluded_lines = self.first_lines(self.excluded) lines = self.first_lines( self.statement_starts, excluded_lines, self.docstrings ) return lines, excluded_lines
python
{ "resource": "" }
q279985
CodeParser.arcs
test
def arcs(self): """Get information about the arcs available in the code. Returns a sorted list of line number pairs. Line numbers have been normalized to the first line of multiline statements. """ all_arcs = [] for l1, l2 in self.byte_parser._all_arcs(): fl1 = self.first_line(l1) fl2 = self.first_line(l2) if fl1 != fl2: all_arcs.append((fl1, fl2)) return sorted(all_arcs)
python
{ "resource": "" }
q279986
CodeParser.exit_counts
test
def exit_counts(self): """Get a mapping from line numbers to count of exits from that line. Excluded lines are excluded. """ excluded_lines = self.first_lines(self.excluded) exit_counts = {} for l1, l2 in self.arcs(): if l1 < 0: # Don't ever report -1 as a line number continue if l1 in excluded_lines: # Don't report excluded lines as line numbers. continue if l2 in excluded_lines: # Arcs to excluded lines shouldn't count. continue if l1 not in exit_counts: exit_counts[l1] = 0 exit_counts[l1] += 1 # Class definitions have one extra exit, so remove one for each: for l in self.classdefs: # Ensure key is there: classdefs can include excluded lines. if l in exit_counts: exit_counts[l] -= 1 return exit_counts
python
{ "resource": "" }
q279987
ByteParser.child_parsers
test
def child_parsers(self): """Iterate over all the code objects nested within this one. The iteration includes `self` as its first value. """ children = CodeObjects(self.code) return [ByteParser(code=c, text=self.text) for c in children]
python
{ "resource": "" }
q279988
ByteParser._bytes_lines
test
def _bytes_lines(self): """Map byte offsets to line numbers in `code`. Uses co_lnotab described in Python/compile.c to map byte offsets to line numbers. Produces a sequence: (b0, l0), (b1, l1), ... Only byte offsets that correspond to line numbers are included in the results. """ # Adapted from dis.py in the standard library. byte_increments = bytes_to_ints(self.code.co_lnotab[0::2]) line_increments = bytes_to_ints(self.code.co_lnotab[1::2]) last_line_num = None line_num = self.code.co_firstlineno byte_num = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: if line_num != last_line_num: yield (byte_num, line_num) last_line_num = line_num byte_num += byte_incr line_num += line_incr if line_num != last_line_num: yield (byte_num, line_num)
python
{ "resource": "" }
q279989
ByteParser._find_statements
test
def _find_statements(self): """Find the statements in `self.code`. Produce a sequence of line numbers that start statements. Recurses into all code objects reachable from `self.code`. """ for bp in self.child_parsers(): # Get all of the lineno information from this code. for _, l in bp._bytes_lines(): yield l
python
{ "resource": "" }
q279990
ByteParser._block_stack_repr
test
def _block_stack_repr(self, block_stack): """Get a string version of `block_stack`, for debugging.""" blocks = ", ".join( ["(%s, %r)" % (dis.opname[b[0]], b[1]) for b in block_stack] ) return "[" + blocks + "]"
python
{ "resource": "" }
q279991
ByteParser._split_into_chunks
test
def _split_into_chunks(self): """Split the code object into a list of `Chunk` objects. Each chunk is only entered at its first instruction, though there can be many exits from a chunk. Returns a list of `Chunk` objects. """ # The list of chunks so far, and the one we're working on. chunks = [] chunk = None # A dict mapping byte offsets of line starts to the line numbers. bytes_lines_map = dict(self._bytes_lines()) # The block stack: loops and try blocks get pushed here for the # implicit jumps that can occur. # Each entry is a tuple: (block type, destination) block_stack = [] # Some op codes are followed by branches that should be ignored. This # is a count of how many ignores are left. ignore_branch = 0 # We have to handle the last two bytecodes specially. ult = penult = None # Get a set of all of the jump-to points. jump_to = set() bytecodes = list(ByteCodes(self.code.co_code)) for bc in bytecodes: if bc.jump_to >= 0: jump_to.add(bc.jump_to) chunk_lineno = 0 # Walk the byte codes building chunks. for bc in bytecodes: # Maybe have to start a new chunk start_new_chunk = False first_chunk = False if bc.offset in bytes_lines_map: # Start a new chunk for each source line number. start_new_chunk = True chunk_lineno = bytes_lines_map[bc.offset] first_chunk = True elif bc.offset in jump_to: # To make chunks have a single entrance, we have to make a new # chunk when we get to a place some bytecode jumps to. start_new_chunk = True elif bc.op in OPS_CHUNK_BEGIN: # Jumps deserve their own unnumbered chunk. This fixes # problems with jumps to jumps getting confused. start_new_chunk = True if not chunk or start_new_chunk: if chunk: chunk.exits.add(bc.offset) chunk = Chunk(bc.offset, chunk_lineno, first_chunk) chunks.append(chunk) # Look at the opcode if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP: if ignore_branch: # Someone earlier wanted us to ignore this branch. ignore_branch -= 1 else: # The opcode has a jump, it's an exit for this chunk. chunk.exits.add(bc.jump_to) if bc.op in OPS_CODE_END: # The opcode can exit the code object. chunk.exits.add(-self.code.co_firstlineno) if bc.op in OPS_PUSH_BLOCK: # The opcode adds a block to the block_stack. block_stack.append((bc.op, bc.jump_to)) if bc.op in OPS_POP_BLOCK: # The opcode pops a block from the block stack. block_stack.pop() if bc.op in OPS_CHUNK_END: # This opcode forces the end of the chunk. if bc.op == OP_BREAK_LOOP: # A break is implicit: jump where the top of the # block_stack points. chunk.exits.add(block_stack[-1][1]) chunk = None if bc.op == OP_END_FINALLY: # For the finally clause we need to find the closest exception # block, and use its jump target as an exit. for block in reversed(block_stack): if block[0] in OPS_EXCEPT_BLOCKS: chunk.exits.add(block[1]) break if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION: # This is an except clause. We want to overlook the next # branch, so that except's don't count as branches. ignore_branch += 1 penult = ult ult = bc if chunks: # The last two bytecodes could be a dummy "return None" that # shouldn't be counted as real code. Every Python code object seems # to end with a return, and a "return None" is inserted if there # isn't an explicit return in the source. if ult and penult: if penult.op == OP_LOAD_CONST and ult.op == OP_RETURN_VALUE: if self.code.co_consts[penult.arg] is None: # This is "return None", but is it dummy? A real line # would be a last chunk all by itself. if chunks[-1].byte != penult.offset: ex = -self.code.co_firstlineno # Split the last chunk last_chunk = chunks[-1] last_chunk.exits.remove(ex) last_chunk.exits.add(penult.offset) chunk = Chunk( penult.offset, last_chunk.line, False ) chunk.exits.add(ex) chunks.append(chunk) # Give all the chunks a length. chunks[-1].length = bc.next_offset - chunks[-1].byte # pylint: disable=W0631,C0301 for i in range(len(chunks)-1): chunks[i].length = chunks[i+1].byte - chunks[i].byte #self.validate_chunks(chunks) return chunks
python
{ "resource": "" }
q279992
ByteParser.validate_chunks
test
def validate_chunks(self, chunks): """Validate the rule that chunks have a single entrance.""" # starts is the entrances to the chunks starts = set([ch.byte for ch in chunks]) for ch in chunks: assert all([(ex in starts or ex < 0) for ex in ch.exits])
python
{ "resource": "" }
q279993
ByteParser._arcs
test
def _arcs(self): """Find the executable arcs in the code. Yields pairs: (from,to). From and to are integer line numbers. If from is < 0, then the arc is an entrance into the code object. If to is < 0, the arc is an exit from the code object. """ chunks = self._split_into_chunks() # A map from byte offsets to chunks jumped into. byte_chunks = dict([(c.byte, c) for c in chunks]) # There's always an entrance at the first chunk. yield (-1, byte_chunks[0].line) # Traverse from the first chunk in each line, and yield arcs where # the trace function will be invoked. for chunk in chunks: if not chunk.first: continue chunks_considered = set() chunks_to_consider = [chunk] while chunks_to_consider: # Get the chunk we're considering, and make sure we don't # consider it again this_chunk = chunks_to_consider.pop() chunks_considered.add(this_chunk) # For each exit, add the line number if the trace function # would be triggered, or add the chunk to those being # considered if not. for ex in this_chunk.exits: if ex < 0: yield (chunk.line, ex) else: next_chunk = byte_chunks[ex] if next_chunk in chunks_considered: continue # The trace function is invoked if visiting the first # bytecode in a line, or if the transition is a # backward jump. backward_jump = next_chunk.byte < this_chunk.byte if next_chunk.first or backward_jump: if next_chunk.line != chunk.line: yield (chunk.line, next_chunk.line) else: chunks_to_consider.append(next_chunk)
python
{ "resource": "" }
q279994
ByteParser._all_chunks
test
def _all_chunks(self): """Returns a list of `Chunk` objects for this code and its children. See `_split_into_chunks` for details. """ chunks = [] for bp in self.child_parsers(): chunks.extend(bp._split_into_chunks()) return chunks
python
{ "resource": "" }
q279995
ByteParser._all_arcs
test
def _all_arcs(self): """Get the set of all arcs in this code object and its children. See `_arcs` for details. """ arcs = set() for bp in self.child_parsers(): arcs.update(bp._arcs()) return arcs
python
{ "resource": "" }
q279996
Coverage.options
test
def options(self, parser, env): """ Add options to command line. """ super(Coverage, self).options(parser, env) parser.add_option("--cover-package", action="append", default=env.get('NOSE_COVER_PACKAGE'), metavar="PACKAGE", dest="cover_packages", help="Restrict coverage output to selected packages " "[NOSE_COVER_PACKAGE]") parser.add_option("--cover-erase", action="store_true", default=env.get('NOSE_COVER_ERASE'), dest="cover_erase", help="Erase previously collected coverage " "statistics before run") parser.add_option("--cover-tests", action="store_true", dest="cover_tests", default=env.get('NOSE_COVER_TESTS'), help="Include test modules in coverage report " "[NOSE_COVER_TESTS]") parser.add_option("--cover-min-percentage", action="store", dest="cover_min_percentage", default=env.get('NOSE_COVER_MIN_PERCENTAGE'), help="Minimum percentage of coverage for tests" "to pass [NOSE_COVER_MIN_PERCENTAGE]") parser.add_option("--cover-inclusive", action="store_true", dest="cover_inclusive", default=env.get('NOSE_COVER_INCLUSIVE'), help="Include all python files under working " "directory in coverage report. Useful for " "discovering holes in test coverage if not all " "files are imported by the test suite. " "[NOSE_COVER_INCLUSIVE]") parser.add_option("--cover-html", action="store_true", default=env.get('NOSE_COVER_HTML'), dest='cover_html', help="Produce HTML coverage information") parser.add_option('--cover-html-dir', action='store', default=env.get('NOSE_COVER_HTML_DIR', 'cover'), dest='cover_html_dir', metavar='DIR', help='Produce HTML coverage information in dir') parser.add_option("--cover-branches", action="store_true", default=env.get('NOSE_COVER_BRANCHES'), dest="cover_branches", help="Include branch coverage in coverage report " "[NOSE_COVER_BRANCHES]") parser.add_option("--cover-xml", action="store_true", default=env.get('NOSE_COVER_XML'), dest="cover_xml", help="Produce XML coverage information") parser.add_option("--cover-xml-file", action="store", default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'), dest="cover_xml_file", metavar="FILE", help="Produce XML coverage information in file")
python
{ "resource": "" }
q279997
Coverage.begin
test
def begin(self): """ Begin recording coverage information. """ log.debug("Coverage begin") self.skipModules = sys.modules.keys()[:] if self.coverErase: log.debug("Clearing previously collected coverage statistics") self.coverInstance.combine() self.coverInstance.erase() self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') self.coverInstance.load() self.coverInstance.start()
python
{ "resource": "" }
q279998
Coverage.report
test
def report(self, stream): """ Output code coverage report. """ log.debug("Coverage report") self.coverInstance.stop() self.coverInstance.combine() self.coverInstance.save() modules = [module for name, module in sys.modules.items() if self.wantModuleCoverage(name, module)] log.debug("Coverage report will cover modules: %s", modules) self.coverInstance.report(modules, file=stream) if self.coverHtmlDir: log.debug("Generating HTML coverage report") self.coverInstance.html_report(modules, self.coverHtmlDir) if self.coverXmlFile: log.debug("Generating XML coverage report") self.coverInstance.xml_report(modules, self.coverXmlFile) # make sure we have minimum required coverage if self.coverMinPercentage: f = StringIO.StringIO() self.coverInstance.report(modules, file=f) m = re.search(r'-------\s\w+\s+\d+\s+\d+\s+(\d+)%\s+\d*\s{0,1}$', f.getvalue()) if m: percentage = int(m.groups()[0]) if percentage < self.coverMinPercentage: log.error('TOTAL Coverage did not reach minimum ' 'required: %d%%' % self.coverMinPercentage) sys.exit(1) else: log.error("No total percentage was found in coverage output, " "something went wrong.")
python
{ "resource": "" }
q279999
Coverage.wantFile
test
def wantFile(self, file, package=None): """If inclusive coverage enabled, return true for all source files in wanted packages. """ if self.coverInclusive: if file.endswith(".py"): if package and self.coverPackages: for want in self.coverPackages: if package.startswith(want): return True else: return True return None
python
{ "resource": "" }