docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Whether a collection of Bokeh objects contains a plot requesting WebGL
Args:
objs (seq[Model or Document]) :
Returns:
bool | def _use_gl(objs):
from ..models.plots import Plot
return _any(objs, lambda obj: isinstance(obj, Plot) and obj.output_backend == "webgl") | 52,588 |
Whether a collection of Bokeh objects contains a TableWidget
Args:
objs (seq[Model or Document]) :
Returns:
bool | def _use_tables(objs):
from ..models.widgets import TableWidget
return _any(objs, lambda obj: isinstance(obj, TableWidget)) | 52,589 |
Whether a collection of Bokeh objects contains a any Widget
Args:
objs (seq[Model or Document]) :
Returns:
bool | def _use_widgets(objs):
from ..models.widgets import Widget
return _any(objs, lambda obj: isinstance(obj, Widget)) | 52,590 |
Produce the value as it should be serialized.
Sometimes it is desirable for the serialized value to differ from
the ``__get__`` in order for the ``__get__`` value to appear simpler
for user or developer convenience.
Args:
obj (HasProps) : the object to get the serialized attribute for
Returns:
JSON-like | def serializable_value(self, obj):
value = self.__get__(obj, obj.__class__)
return self.property.serialize_value(value) | 52,592 |
Create a PropertyDescriptor for basic Bokeh properties.
Args:
name (str) : The attribute name that this property is for
property (Property) : A basic property to create a descriptor for | def __init__(self, name, property):
super(BasicPropertyDescriptor, self).__init__(name)
self.property = property
self.__doc__ = self.property.__doc__ | 52,594 |
Implement the deleter for the Python `descriptor protocol`_.
Args:
obj (HasProps) : An instance to delete this property from | def __delete__(self, obj):
if self.name in obj._property_values:
old_value = obj._property_values[self.name]
del obj._property_values[self.name]
self.trigger_if_changed(obj, old_value)
if self.name in obj._unstable_default_values:
del obj._unstable_default_values[self.name] | 52,597 |
Get the default value that will be used for a specific instance.
Args:
obj (HasProps) : The instance to get the default value for.
Returns:
object | def instance_default(self, obj):
return self.property.themed_default(obj.__class__, self.name, obj.themed_values()) | 52,598 |
Send a change event notification if the property is set to a
value is not equal to ``old``.
Args:
obj (HasProps)
The object the property is being set on.
old (obj) :
The previous value of the property to compare
Returns:
None | def trigger_if_changed(self, obj, old):
new_value = self.__get__(obj, obj.__class__)
if not self.property.matches(old, new_value):
self._trigger(obj, old, new_value) | 52,600 |
Internal helper for dealing with units associated units properties
when setting values on |UnitsSpec| properties.
When ``value`` is a dict, this function may mutate the value of the
associated units property.
Args:
obj (HasProps) : instance to update units spec property value for
value (obj) : new value to set for the property
Returns:
copy of ``value``, with 'units' key and value removed when
applicable | def _extract_units(self, obj, value):
if isinstance(value, dict):
if 'units' in value:
value = copy(value) # so we can modify it
units = value.pop("units", None)
if units:
self.units_prop.__set__(obj, units)
return value | 52,613 |
Clamp numeric values to be non-negative, an optionally, less than a
given maximum.
Args:
value (float) :
A number to clamp.
maximum (float, optional) :
A max bound to to clamp to. If None, there is no upper bound,
and values are only clamped to be non-negative. (default: None)
Returns:
float | def clamp(value, maximum=None):
value = max(value, 0)
if maximum is not None:
return min(value, maximum)
else:
return value | 52,650 |
Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color | def darken(self, amount):
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l - amount)
return self.from_hsl(hsl) | 52,651 |
Lighten (increase the luminance) of this color.
Args:
amount (float) :
Amount to increase the luminance by (clamped above zero)
Returns:
Color | def lighten(self, amount):
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l + amount, 1)
return self.from_hsl(hsl) | 52,652 |
Set up a handler for button state changes (clicks).
Args:
handler (func) : handler function to call when button is toggled.
Returns:
None | def on_click(self, handler):
self.on_change('active', lambda attr, old, new: handler(new)) | 52,654 |
Set up a handler for button or menu item clicks.
Args:
handler (func) : handler function to call when button is activated.
Returns:
None | def on_click(self, handler):
self.on_event(ButtonClick, handler)
self.on_event(MenuItemClick, handler) | 52,655 |
Attempt to import an optional dependency.
Silently returns None if the requested module is not available.
Args:
mod_name (str) : name of the optional module to try to import
Returns:
imported module or None, if import fails | def import_optional(mod_name):
try:
return import_module(mod_name)
except ImportError:
pass
except Exception:
msg = "Failed to import optional module `{}`".format(mod_name)
log.exception(msg) | 52,657 |
Detect if PhantomJS is avaiable in PATH, at a minimum version.
Args:
version (str, optional) :
Required minimum version for PhantomJS (mostly for testing)
Returns:
str, path to PhantomJS | def detect_phantomjs(version='2.1'):
if settings.phantomjs_path() is not None:
phantomjs_path = settings.phantomjs_path()
else:
if hasattr(shutil, "which"):
phantomjs_path = shutil.which("phantomjs") or "phantomjs"
else:
# Python 2 relies on Environment variable in PATH - attempt to use as follows
phantomjs_path = "phantomjs"
try:
proc = Popen([phantomjs_path, "--version"], stdout=PIPE, stderr=PIPE)
proc.wait()
out = proc.communicate()
if len(out[1]) > 0:
raise RuntimeError('Error encountered in PhantomJS detection: %r' % out[1].decode('utf8'))
required = V(version)
installed = V(out[0].decode('utf8'))
if installed < required:
raise RuntimeError('PhantomJS version to old. Version>=%s required, installed: %s' % (required, installed))
except OSError:
raise RuntimeError('PhantomJS is not present in PATH or BOKEH_PHANTOMJS_PATH. Try "conda install phantomjs" or \
"npm install -g phantomjs-prebuilt"')
return phantomjs_path | 52,658 |
Configure a Receiver with a specific Bokeh protocol version.
Args:
protocol (Protocol) :
A Bokeh protocol object to use to assemble collected message
fragments. | def __init__(self, protocol):
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._buf_header = None | 52,659 |
Consume individual protocol message fragments.
Args:
fragment (``JSON``) :
A message fragment to assemble. When a complete message is
assembled, the receiver state will reset to begin consuming a
new message. | def consume(self, fragment):
self._current_consumer(fragment)
raise gen.Return(self._message) | 52,660 |
Return a driver function that can advance a "bounced" sequence
of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce | def bounce(sequence):
N = len(sequence)
def f(i):
div, mod = divmod(i, N)
if div % 2 == 0:
return sequence[mod]
else:
return sequence[N-mod-1]
return partial(force, sequence=_advance(f)) | 52,683 |
Return a driver function that can advance a sequence of cosine values.
.. code-block:: none
value = A * cos(w*i + phi) + offset
Args:
w (float) : a frequency for the cosine driver
A (float) : an amplitude for the cosine driver
phi (float) : a phase offset to start the cosine driver with
offset (float) : a global offset to add to the driver values | def cosine(w, A=1, phi=0, offset=0):
from math import cos
def f(i):
return A * cos(w*i + phi) + offset
return partial(force, sequence=_advance(f)) | 52,684 |
Return a driver function that can advance a sequence of linear values.
.. code-block:: none
value = m * i + b
Args:
m (float) : a slope for the linear driver
x (float) : an offset for the linear driver | def linear(m=1, b=0):
def f(i):
return m * i + b
return partial(force, sequence=_advance(f)) | 52,685 |
Return a driver function that can advance a repeated of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce | def repeat(sequence):
N = len(sequence)
def f(i):
return sequence[i%N]
return partial(force, sequence=_advance(f)) | 52,686 |
Return a driver function that can advance a sequence of sine values.
.. code-block:: none
value = A * sin(w*i + phi) + offset
Args:
w (float) : a frequency for the sine driver
A (float) : an amplitude for the sine driver
phi (float) : a phase offset to start the sine driver with
offset (float) : a global offset to add to the driver values | def sine(w, A=1, phi=0, offset=0):
from math import sin
def f(i):
return A * sin(w*i + phi) + offset
return partial(force, sequence=_advance(f)) | 52,687 |
Look up a Bokeh model class, given its view model name.
Args:
view_model_name (str) :
A view model name for a Bokeh model to look up
Returns:
Model: the model class corresponding to ``view_model_name``
Raises:
KeyError, if the model cannot be found
Example:
.. code-block:: python
>>> from bokeh.model import get_class
>>> get_class("Range1d")
<class 'bokeh.models.ranges.Range1d'> | def get_class(view_model_name):
# in order to look up from the model catalog that MetaModel maintains, it
# has to be creates first. These imports ensure that all built-in Bokeh
# models are represented in the catalog.
from . import models; models
from .plotting import Figure; Figure
d = MetaModel.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name) | 52,724 |
Add a callback on this object to trigger when ``attr`` changes.
Args:
attr (str) : an attribute name on this object
*callbacks (callable) : callback functions to register
Returns:
None
Example:
.. code-block:: python
widget.on_change('value', callback1, callback2, ..., callback_n) | def on_change(self, attr, *callbacks):
if attr not in self.properties():
raise ValueError("attempted to add a callback on nonexistent %s.%s property" % (self.__class__.__name__, attr))
super(Model, self).on_change(attr, *callbacks) | 52,734 |
Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like) :
updates (dict) :
Returns:
None | def set_select(self, selector, updates):
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val) | 52,735 |
Stop the Bokeh Server.
This stops and removes all Bokeh Server ``IOLoop`` callbacks, as well
as stops the ``HTTPServer`` that this instance was configured with.
Args:
fast (bool):
Whether to wait for orderly cleanup (default: True)
Returns:
None | def stop(self, wait=True):
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop() | 52,770 |
Gets all currently active sessions for applications.
Args:
app_path (str, optional) :
The configured application path for the application to return
sessions for. If None, return active sessions for all
applications. (default: None)
Returns:
list[ServerSession] | def get_sessions(self, app_path=None):
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions | 52,772 |
Execute the configured source code in a module and run any post
checks.
Args:
module (Module) : a module to execute the configured code in.
post_check(callable) : a function that can raise an exception
if expected post-conditions are not met after code execution. | def run(self, module, post_check):
try:
# Simulate the sys.path behaviour decribed here:
#
# https://docs.python.org/2/library/sys.html#sys.path
_cwd = os.getcwd()
_sys_path = list(sys.path)
_sys_argv = list(sys.argv)
sys.path.insert(0, os.path.dirname(self._path))
sys.argv = [os.path.basename(self._path)] + self._argv
exec(self._code, module.__dict__)
post_check()
except Exception as e:
self._failed = True
self._error_detail = traceback.format_exc()
_exc_type, _exc_value, exc_traceback = sys.exc_info()
filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]
self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt)
finally:
# undo sys.path, CWD fixups
os.chdir(_cwd)
sys.path = _sys_path
sys.argv = _sys_argv
self.ran = True | 52,779 |
Initialize the subcommand with its parser
Args:
parser (Parser) : an Argparse ``Parser`` instance to configure
with the args for this subcommand.
This method will automatically add all the arguments described in
``self.args``. Subclasses can perform any additional customizations
on ``self.parser``. | def __init__(self, parser):
self.parser = parser
args = getattr(self, 'args', ())
for arg in args:
flags = arg[0]
if not isinstance(flags, tuple):
flags = (flags,)
self.parser.add_argument(*flags, **arg[1]) | 52,780 |
Pull a document from the server, overwriting the passed-in document
Args:
document : (Document)
The document to overwrite with server content.
Returns:
None | def pull_doc(self, document):
msg = self._protocol.create('PULL-DOC-REQ')
reply = self._send_message_wait_for_reply(msg)
if reply is None:
raise RuntimeError("Connection to server was lost")
elif reply.header['msgtype'] == 'ERROR':
raise RuntimeError("Failed to pull document: " + reply.content['text'])
else:
reply.push_to_document(document) | 52,795 |
Push a document to the server, overwriting any existing server-side doc.
Args:
document : (Document)
A Document to push to the server
Returns:
The server reply | def push_doc(self, document):
msg = self._protocol.create('PUSH-DOC', document)
reply = self._send_message_wait_for_reply(msg)
if reply is None:
raise RuntimeError("Connection to server was lost")
elif reply.header['msgtype'] == 'ERROR':
raise RuntimeError("Failed to push document: " + reply.content['text'])
else:
return reply | 52,796 |
Create a ``dict`` of columns from a Pandas ``DataFrame``,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict[str, np.array] | def _data_from_df(df):
_df = df.copy()
# Flatten columns
if isinstance(df.columns, pd.MultiIndex):
try:
_df.columns = ['_'.join(col) for col in _df.columns.values]
except TypeError:
raise TypeError('Could not flatten MultiIndex columns. '
'use string column names or flatten manually')
# Transform columns CategoricalIndex in list
if isinstance(df.columns, pd.CategoricalIndex):
_df.columns = df.columns.tolist()
# Flatten index
index_name = ColumnDataSource._df_index_name(df)
if index_name == 'index':
_df.index = pd.Index(_df.index.values)
else:
_df.index = pd.Index(_df.index.values, name=index_name)
_df.reset_index(inplace=True)
tmp_data = {c: v.values for c, v in _df.iteritems()}
new_data = {}
for k, v in tmp_data.items():
new_data[k] = v
return new_data | 52,829 |
Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used | def add(self, data, name=None):
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name | 52,831 |
Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued. | def remove(self, name):
try:
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name) | 52,832 |
Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__`` | def accumulate_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
s.update(base)
setattr(cls, cachename, s)
return cls.__dict__[cachename] | 52,878 |
Traverse the class hierarchy and accumulate the special dicts
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__dataspecs__``,
``__overridden_defaults__`` | def accumulate_dict_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
d = dict()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
for k,v in base.items():
if k not in d:
d[k] = v
setattr(cls, cachename, d)
return cls.__dict__[cachename] | 52,879 |
Intercept attribute setting on HasProps in order to special case
a few situations:
* short circuit all property machinery for ``_private`` attributes
* suggest similar attribute names on attribute errors
Args:
name (str) : the name of the attribute to set on this object
value (obj) : the value to set
Returns:
None | def __setattr__(self, name, value):
# self.properties() below can be expensive so avoid it
# if we're just setting a private underscore field
if name.startswith("_"):
super(HasProps, self).__setattr__(name, value)
return
props = sorted(self.properties())
descriptor = getattr(self.__class__, name, None)
if name in props or (descriptor is not None and descriptor.fset is not None):
super(HasProps, self).__setattr__(name, value)
else:
matches, text = difflib.get_close_matches(name.lower(), props), "similar"
if not matches:
matches, text = props, "possible"
raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" %
(name, self.__class__.__name__, text, nice_join(matches))) | 52,883 |
Structural equality of models.
Args:
other (HasProps) : the other instance to compare to
Returns:
True, if properties are structurally equal, otherwise False | def equals(self, other):
# NOTE: don't try to use this to implement __eq__. Because then
# you will be tempted to implement __hash__, which would interfere
# with mutability of models. However, not implementing __hash__
# will make bokeh unusable in Python 3, where proper implementation
# of __hash__ is required when implementing __eq__.
if not isinstance(other, self.__class__):
return False
else:
return self.properties_with_values() == other.properties_with_values() | 52,884 |
Collect the names of properties on this class.
This method *optionally* traverses the class hierarchy and includes
properties defined on any parent classes.
Args:
with_bases (bool, optional) :
Whether to include properties defined on parent classes in
the results. (default: True)
Returns:
set[str] : property names | def properties(cls, with_bases=True):
if with_bases:
return accumulate_from_superclasses(cls, "__properties__")
else:
return set(cls.__properties__) | 52,887 |
Query the properties values of |HasProps| instances with a
predicate.
Args:
query (callable) :
A callable that accepts property descriptors and returns True
or False
include_defaults (bool, optional) :
Whether to include properties that have not been explicitly
set by a user (default: True)
Returns:
dict : mapping of property names and values for matching properties | def query_properties_with_values(self, query, include_defaults=True):
themed_keys = set()
result = dict()
if include_defaults:
keys = self.properties()
else:
# TODO (bev) For now, include unstable default values. Things rely on Instances
# always getting serialized, even defaults, and adding unstable defaults here
# accomplishes that. Unmodified defaults for property value containers will be
# weeded out below.
keys = set(self._property_values.keys()) | set(self._unstable_default_values.keys())
if self.themed_values():
themed_keys = set(self.themed_values().keys())
keys |= themed_keys
for key in keys:
descriptor = self.lookup(key)
if not query(descriptor):
continue
value = descriptor.serializable_value(self)
if not include_defaults and key not in themed_keys:
if isinstance(value, PropertyValueContainer) and key in self._unstable_default_values:
continue
result[key] = value
return result | 52,889 |
Apply a set of theme values which will be used rather than
defaults, but will not override application-set values.
The passed-in dictionary may be kept around as-is and shared with
other instances to save memory (so neither the caller nor the
|HasProps| instance should modify it).
Args:
property_values (dict) : theme values to use in place of defaults
Returns:
None | def apply_theme(self, property_values):
old_dict = self.themed_values()
# if the same theme is set again, it should reuse the same dict
if old_dict is property_values:
return
removed = set()
# we're doing a little song-and-dance to avoid storing __themed_values__ or
# an empty dict, if there's no theme that applies to this HasProps instance.
if old_dict is not None:
removed.update(set(old_dict.keys()))
added = set(property_values.keys())
old_values = dict()
for k in added.union(removed):
old_values[k] = getattr(self, k)
if len(property_values) > 0:
setattr(self, '__themed_values__', property_values)
elif hasattr(self, '__themed_values__'):
delattr(self, '__themed_values__')
# Property container values might be cached even if unmodified. Invalidate
# any cached values that are not modified at this point.
for k, v in old_values.items():
if k in self._unstable_themed_values:
del self._unstable_themed_values[k]
# Emit any change notifications that result
for k, v in old_values.items():
descriptor = self.lookup(k)
descriptor.trigger_if_changed(self, v) | 52,890 |
Indent all the lines in a given block of text by a specified amount.
Args:
text (str) :
The text to indent
n (int, optional) :
The amount to indent each line by (default: 2)
ch (char, optional) :
What character to fill the indentation with (default: " ") | def indent(text, n=2, ch=" "):
padding = ch * n
return "\n".join(padding+line for line in text.split("\n")) | 52,892 |
Initialize a new message from header, metadata, and content
dictionaries.
To assemble a message from existing JSON fragments, use the
``assemble`` method.
To create new messages with automatically generated headers,
use subclass ``create`` methods.
Args:
header (JSON-like) :
metadata (JSON-like) :
content (JSON-like) : | def __init__(self, header, metadata, content):
self.header = header
self.metadata = metadata
self.content = content
self._buffers = [] | 52,923 |
Creates a new message, assembled from JSON fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
Message subclass
Raises:
MessageError | def assemble(cls, header_json, metadata_json, content_json):
try:
header = json_decode(header_json)
except ValueError:
raise MessageError("header could not be decoded")
try:
metadata = json_decode(metadata_json)
except ValueError:
raise MessageError("metadata could not be decoded")
try:
content = json_decode(content_json)
except ValueError:
raise MessageError("content could not be decoded")
msg = cls(header, metadata, content)
msg._header_json = header_json
msg._metadata_json = metadata_json
msg._content_json = content_json
return msg | 52,924 |
Associate a buffer header and payload with this message.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
MessageError | def add_buffer(self, buf_header, buf_payload):
if 'num_buffers' in self._header:
self._header['num_buffers'] += 1
else:
self._header['num_buffers'] = 1
self._header_json = None
self._buffers.append((buf_header, buf_payload)) | 52,925 |
Add a buffer header and payload that we read from the socket.
This differs from add_buffer() because we're validating vs.
the header's num_buffers, instead of filling in the header.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
ProtocolError | def assemble_buffer(self, buf_header, buf_payload):
if self.header.get('num_buffers', 0) <= len(self._buffers):
raise ProtocolError("too many buffers received expecting " + str(self.header['num_buffers']))
self._buffers.append((buf_header, buf_payload)) | 52,926 |
Write any buffer headers and payloads to the given connection.
Args:
conn (object) :
May be any object with a ``write_message`` method. Typically,
a Tornado ``WSHandler`` or ``WebSocketClientConnection``
locked (bool) :
Returns:
int : number of bytes sent | def write_buffers(self, conn, locked=True):
if conn is None:
raise ValueError("Cannot write_buffers to connection None")
sent = 0
for header, payload in self._buffers:
yield conn.write_message(header, locked=locked)
yield conn.write_message(payload, binary=True, locked=locked)
sent += (len(header) + len(payload))
raise gen.Return(sent) | 52,927 |
Return a message header fragment dict.
Args:
request_id (str or None) :
Message ID of the message this message replies to
Returns:
dict : a message header | def create_header(cls, request_id=None):
header = {
'msgid' : bkserial.make_id(),
'msgtype' : cls.msgtype
}
if request_id is not None:
header['reqid'] = request_id
return header | 52,928 |
Send the message on the given connection.
Args:
conn (WebSocketHandler) : a WebSocketHandler to send messages
Returns:
int : number of bytes sent | def send(self, conn):
if conn is None:
raise ValueError("Cannot send to connection None")
with (yield conn.write_lock.acquire()):
sent = 0
yield conn.write_message(self.header_json, locked=False)
sent += len(self.header_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#yield gen.sleep(0.1)
yield conn.write_message(self.metadata_json, locked=False)
sent += len(self.metadata_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#yield gen.sleep(0.1)
yield conn.write_message(self.content_json, locked=False)
sent += len(self.content_json)
sent += yield self.write_buffers(conn, locked=False)
raise gen.Return(sent) | 52,929 |
Given a JSON representation of all the models in a graph, return a
dict of new model objects.
Args:
references_json (``JSON``)
JSON specifying new Bokeh models to create
Returns:
dict[str, Model] | def instantiate_references_json(references_json):
# Create all instances, but without setting their props
references = {}
for obj in references_json:
obj_id = obj['id']
obj_type = obj.get('subtype', obj['type'])
cls = get_class(obj_type)
instance = cls.__new__(cls, id=obj_id)
if instance is None:
raise RuntimeError('Error loading model from JSON (type: %s, id: %s)' % (obj_type, obj_id))
references[instance.id] = instance
return references | 52,958 |
Given a list of all models in a graph, return JSON representing
them and their properties.
Args:
references (seq[Model]) :
A list of models to convert to JSON
Returns:
list | def references_json(references):
references_json = []
for r in references:
ref = r.ref
ref['attributes'] = r._to_json_like(include_defaults=False)
references_json.append(ref)
return references_json | 52,959 |
Create a new base event.
Args:
model (Model) : a Bokeh model to register event callbacks on | def __init__(self, model):
self._model_id = None
if model is not None:
self._model_id = model.id | 52,964 |
Create a ``DataSpec`` dict that applies a client-side ``Jitter``
transformation to a ``ColumnDataSource`` column.
Args:
field_name (str) : a field name to configure ``DataSpec`` with
value (float) : the fixed offset to add to column data
range (Range, optional) : a range to use for computing synthetic
coordinates when necessary, e.g. a ``FactorRange`` when the
column data is categorical (default: None)
Returns:
dict | def dodge(field_name, value, range=None):
return field(field_name, Dodge(value=value, range=range)) | 52,977 |
Create a new Message instance for the given type.
Args:
msgtype (str) : | def create(self, msgtype, *args, **kwargs):
if msgtype not in self._messages:
raise ProtocolError("Unknown message type %r for protocol version %s" % (msgtype, self._version))
return self._messages[msgtype].create(*args, **kwargs) | 53,018 |
Create a Message instance assembled from json fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
message | def assemble(self, header_json, metadata_json, content_json):
header = json_decode(header_json)
if 'msgtype' not in header:
log.error("Bad header with no msgtype was: %r", header)
raise ProtocolError("No 'msgtype' in header")
return self._messages[header['msgtype']].assemble(
header_json, metadata_json, content_json
) | 53,019 |
Query this document for objects that match the given selector.
Args:
selector (JSON-like query dictionary) : you can query by type or by
name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
Returns:
seq[Model] | def select(self, selector):
if self._is_single_string_selector(selector, 'name'):
# special-case optimization for by-name query
return self._all_models_by_name.get_all(selector['name'])
else:
return find(self._all_models.values(), selector) | 53,041 |
Query this document for objects that match the given selector.
Raises an error if more than one object is found. Returns
single matching object, or None if nothing is found
Args:
selector (JSON-like query dictionary) : you can query by type or by
name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
Returns:
Model or None | def select_one(self, selector):
result = list(self.select(selector))
if len(result) > 1:
raise ValueError("Found more than one model matching %s: %r" % (selector, result))
if len(result) == 0:
return None
return result[0] | 53,042 |
Convert the document to a JSON string.
Args:
indent (int or None, optional) : number of spaces to indent, or
None to suppress all newlines and indentation (default: None)
Returns:
str | def to_json_string(self, indent=None):
root_ids = []
for r in self._roots:
root_ids.append(r.id)
root_references = self._all_models.values()
json = {
'title' : self.title,
'roots' : {
'root_ids' : root_ids,
'references' : references_json(root_references)
},
'version' : __version__
}
return serialize_json(json, indent=indent) | 53,043 |
Move all data in this doc to the dest_doc, leaving this doc empty.
Args:
dest_doc (Document) :
The Bokeh document to populate with data from this one
Returns:
None | def _destructively_move(self, dest_doc):
if dest_doc is self:
raise RuntimeError("Attempted to overwrite a document with itself")
dest_doc.clear()
# we have to remove ALL roots before adding any
# to the new doc or else models referenced from multiple
# roots could be in both docs at once, which isn't allowed.
roots = []
self._push_all_models_freeze()
try:
while self.roots:
r = next(iter(self.roots))
self.remove_root(r)
roots.append(r)
finally:
self._pop_all_models_freeze()
for r in roots:
if r.document is not None:
raise RuntimeError("Somehow we didn't detach %r" % (r))
if len(self._all_models) != 0:
raise RuntimeError("_all_models still had stuff in it: %r" % (self._all_models))
for r in roots:
dest_doc.add_root(r)
dest_doc.title = self.title | 53,046 |
Add a callback on this object to trigger when ``attr`` changes.
Args:
attr (str) : an attribute name on this object
callback (callable) : a callback function to register
Returns:
None | def on_change(self, attr, *callbacks):
if len(callbacks) == 0:
raise ValueError("on_change takes an attribute name and one or more callbacks, got only one parameter")
_callbacks = self._callbacks.setdefault(attr, [])
for callback in callbacks:
if callback in _callbacks:
continue
_check_callback(callback, ('attr', 'old', 'new'))
_callbacks.append(callback) | 53,065 |
Trigger callbacks for ``attr`` on this object.
Args:
attr (str) :
old (object) :
new (object) :
Returns:
None | def trigger(self, attr, old, new, hint=None, setter=None):
def invoke():
callbacks = self._callbacks.get(attr)
if callbacks:
for callback in callbacks:
callback(attr, old, new)
if hasattr(self, '_document') and self._document is not None:
self._document._notify_change(self, attr, old, new, hint, setter, invoke)
else:
invoke() | 53,067 |
Convert an ``ws(s)`` URL for a Bokeh server into the appropriate
``http(s)`` URL for the websocket endpoint.
Args:
url (str):
An ``ws(s)`` URL ending in ``/ws``
Returns:
str:
The corresponding ``http(s)`` URL.
Raises:
ValueError:
If the input URL is not of the proper form. | def server_url_for_websocket_url(url):
if url.startswith("ws:"):
reprotocoled = "http" + url[2:]
elif url.startswith("wss:"):
reprotocoled = "https" + url[3:]
else:
raise ValueError("URL has non-websocket protocol " + url)
if not reprotocoled.endswith("/ws"):
raise ValueError("websocket URL does not end in /ws")
return reprotocoled[:-2] | 53,140 |
Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into
the appropriate ``ws(s)`` URL
Args:
url (str):
An ``http(s)`` URL
Returns:
str:
The corresponding ``ws(s)`` URL ending in ``/ws``
Raises:
ValueError:
If the input URL is not of the proper form. | def websocket_url_for_server_url(url):
if url.startswith("http:"):
reprotocoled = "ws" + url[4:]
elif url.startswith("https:"):
reprotocoled = "wss" + url[5:]
else:
raise ValueError("URL has unknown protocol " + url)
if reprotocoled.endswith("/"):
return reprotocoled + "ws"
else:
return reprotocoled + "/ws" | 53,141 |
Delegate a received message to the appropriate handler.
Args:
message (Message) :
The message that was receive that needs to be handled
connection (ServerConnection) :
The connection that received this message
Raises:
ProtocolError | def handle(self, message, connection):
handler = self._handlers.get((message.msgtype, message.revision))
if handler is None:
handler = self._handlers.get(message.msgtype)
if handler is None:
raise ProtocolError("%s not expected on server" % message)
try:
work = yield handler(message, connection)
except Exception as e:
log.error("error handling message %r: %r", message, e)
log.debug(" message header %r content %r", message.header, message.content, exc_info=1)
work = connection.error(message, repr(e))
raise gen.Return(work) | 53,156 |
Setup the namespace browser with provided settings.
Args:
dataframe_format (string): default floating-point format for
DataFrame editor | def setup(self, check_all=None, exclude_private=None,
exclude_uppercase=None, exclude_capitalized=None,
exclude_unsupported=None, excluded_names=None,
minmax=None, dataframe_format=None):
assert self.shellwidget is not None
self.check_all = check_all
self.exclude_private = exclude_private
self.exclude_uppercase = exclude_uppercase
self.exclude_capitalized = exclude_capitalized
self.exclude_unsupported = exclude_unsupported
self.excluded_names = excluded_names
self.minmax = minmax
self.dataframe_format = dataframe_format
if self.editor is not None:
self.editor.setup_menu(minmax)
self.editor.set_dataframe_format(dataframe_format)
self.exclude_private_action.setChecked(exclude_private)
self.exclude_uppercase_action.setChecked(exclude_uppercase)
self.exclude_capitalized_action.setChecked(exclude_capitalized)
self.exclude_unsupported_action.setChecked(exclude_unsupported)
self.refresh_table()
return
self.editor = RemoteCollectionsEditorTableView(
self,
data=None,
minmax=minmax,
shellwidget=self.shellwidget,
dataframe_format=dataframe_format)
self.editor.sig_option_changed.connect(self.sig_option_changed.emit)
self.editor.sig_files_dropped.connect(self.import_data)
self.editor.sig_free_memory.connect(self.sig_free_memory.emit)
self.setup_option_actions(exclude_private, exclude_uppercase,
exclude_capitalized, exclude_unsupported)
# Setup toolbar layout.
self.tools_layout = QHBoxLayout()
toolbar = self.setup_toolbar()
for widget in toolbar:
self.tools_layout.addWidget(widget)
self.tools_layout.addStretch()
self.setup_options_button()
# Setup layout.
layout = create_plugin_layout(self.tools_layout, self.editor)
self.setLayout(layout)
self.sig_option_changed.connect(self.option_changed) | 53,403 |
Return the self.data index position for the filename.
Args:
filename: Name of the file to search for in self.data.
Returns:
The self.data index for the filename. Returns None
if the filename is not found in self.data. | def has_filename(self, filename):
fixpath = lambda path: osp.normcase(osp.realpath(path))
for index, finfo in enumerate(self.data):
if fixpath(filename) == fixpath(finfo.filename):
return index
return None | 53,641 |
Return if filename is in the editor stack.
Args:
filename: Name of the file to search for. If filename is None,
then checks if any file is open.
Returns:
True: If filename is None and a file is open.
False: If filename is None and no files are open.
None: If filename is not None and the file isn't found.
integer: Index of file name in editor stack. | def is_file_opened(self, filename=None):
if filename is None:
# Is there any file opened?
return len(self.data) > 0
else:
return self.has_filename(filename) | 53,643 |
Low-level function for writing text of editor to file.
Args:
fileinfo: FileInfo object associated to editor to be saved
filename: str with filename to save to
This is a low-level function that only saves the text to file in the
correct encoding without doing any error handling. | def _write_to_file(self, fileinfo, filename):
txt = to_text_string(fileinfo.editor.get_text_with_eol())
fileinfo.encoding = encoding.write(txt, filename, fileinfo.encoding) | 53,655 |
Select a name to save a file.
Args:
original_filename: Used in the dialog to display the current file
path and name.
Returns:
Normalized path for the selected file name or None if no name was
selected. | def select_savename(self, original_filename):
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
# Don't use filters on KDE to not make the dialog incredible
# slow
# Fixes issue 4156
if is_kde_desktop() and not is_anaconda():
filters = ''
selectedfilter = ''
else:
filters = self.edit_filters
selectedfilter = get_filter(self.edit_filetypes,
osp.splitext(original_filename)[1])
self.redirect_stdio.emit(False)
filename, _selfilter = getsavefilename(self, _("Save file"),
original_filename,
filters=filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails)
self.redirect_stdio.emit(True)
if filename:
return osp.normpath(filename)
return None | 53,658 |
Create a decoration and add it to the editor.
Args:
start (int) start line of the decoration
end (int) end line of the decoration | def _decorate_block(self, start, end):
color = self._get_scope_highlight_color()
draw_order = DRAW_ORDERS.get('codefolding')
d = TextDecoration(self.editor.document(), start_line=start,
end_line=end+1, draw_order=draw_order)
d.set_background(color)
d.set_full_width(True, clear=False)
self.editor.decorations.add(d)
self._scope_decos.append(d) | 53,737 |
Add new history tab.
Args:
filename (str): file to be loaded in a new tab. | def add_history(self, filename, color_scheme, font, wrap):
filename = encoding.to_unicode_from_fs(filename)
if filename in self.filenames:
return
editor = codeeditor.CodeEditor(self)
if osp.splitext(filename)[1] == '.py':
language = 'py'
else:
language = 'bat'
editor.setup_editor(linenumbers=False,
language=language,
scrollflagarea=False,
show_class_func_dropdown=False)
editor.focus_changed.connect(lambda: self.focus_changed.emit())
editor.setReadOnly(True)
editor.set_font(font, color_scheme)
editor.toggle_wrap_mode(wrap)
text, _ = encoding.read(filename)
editor.set_text(text)
editor.set_cursor_position('eof')
self.editors.append(editor)
self.filenames.append(filename)
index = self.tabwidget.addTab(editor, osp.basename(filename))
self.find_widget.set_editor(editor)
self.tabwidget.setTabToolTip(index, filename)
self.tabwidget.setCurrentIndex(index) | 53,754 |
Append an entry to history filename.
Args:
filename (str): file to be updated in a new tab.
command (str): line to be added.
go_to_eof (bool): scroll to the end of file. | def append_to_history(self, filename, command, go_to_eof):
if not is_text_string(filename): # filename is a QString
filename = to_text_string(filename.toUtf8(), 'utf-8')
command = to_text_string(command)
index = self.filenames.index(filename)
self.editors[index].append(command)
if go_to_eof:
self.editors[index].set_cursor_position('eof')
self.tabwidget.setCurrentIndex(index) | 53,755 |
Show files in external file explorer
Args:
fnames (list): Names of files to show. | def show_in_external_file_explorer(fnames=None):
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
for fname in fnames:
open_file_in_external_explorer(fname) | 53,821 |
Draw the given breakpoint pixmap.
Args:
top (int): top of the line to draw the breakpoint icon.
painter (QPainter)
icon_name (srt): key of icon to draw (see: self.icons) | def _draw_breakpoint_icon(self, top, painter, icon_name):
rect = QRect(0, top, self.sizeHint().width(),
self.sizeHint().height())
try:
icon = self.icons[icon_name]
except KeyError as e:
debug_print("Breakpoint icon doen't exist, {}".format(e))
else:
icon.paint(painter, rect) | 54,087 |
Change visibility and connect/disconnect signal.
Args:
state (bool): Activate/deactivate. | def on_state_changed(self, state):
if state:
self.editor.sig_breakpoints_changed.connect(self.repaint)
self.editor.sig_debug_stop.connect(self.set_current_line_arrow)
self.editor.sig_debug_stop[()].connect(self.stop_clean)
self.editor.sig_debug_start.connect(self.start_clean)
else:
self.editor.sig_breakpoints_changed.disconnect(self.repaint)
self.editor.sig_debug_stop.disconnect(self.set_current_line_arrow)
self.editor.sig_debug_stop[()].disconnect(self.stop_clean)
self.editor.sig_debug_start.disconnect(self.start_clean) | 54,091 |
Set extra selections for a key.
Also assign draw orders to leave current_cell and current_line
in the backgrund (and avoid them to cover other decorations)
NOTE: This will remove previous decorations added to the same key.
Args:
key (str) name of the extra selections group.
extra_selections (list of sourcecode.api.TextDecoration). | def set_extra_selections(self, key, extra_selections):
# use draw orders to highlight current_cell and current_line first
draw_order = DRAW_ORDERS.get(key)
if draw_order is None:
draw_order = DRAW_ORDERS.get('on_top')
for selection in extra_selections:
selection.draw_order = draw_order
self.clear_extra_selections(key)
self.extra_selections_dict[key] = extra_selections | 54,195 |
Remove decorations added through set_extra_selections.
Args:
key (str) name of the extra selections group. | def clear_extra_selections(self, key):
for decoration in self.extra_selections_dict.get(key, []):
self.decorations.remove(decoration)
self.extra_selections_dict[key] = [] | 54,197 |
Constructor.
Autosave is disabled after construction and needs to be enabled
explicitly if required.
Args:
editor (Editor): editor plugin. | def __init__(self, editor):
self.editor = editor
self.timer = QTimer(self.editor)
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.do_autosave)
self._enabled = False # Can't use setter here
self._interval = self.DEFAULT_AUTOSAVE_INTERVAL | 54,256 |
Create unique autosave file name for specified file name.
Args:
filename (str): original file name
autosave_dir (str): directory in which autosave files are stored | def create_unique_autosave_filename(self, filename, autosave_dir):
basename = osp.basename(filename)
autosave_filename = osp.join(autosave_dir, basename)
if autosave_filename in self.name_mapping.values():
counter = 0
root, ext = osp.splitext(basename)
while autosave_filename in self.name_mapping.values():
counter += 1
autosave_basename = '{}-{}{}'.format(root, counter, ext)
autosave_filename = osp.join(autosave_dir, autosave_basename)
return autosave_filename | 54,261 |
Get name of autosave file for specified file name.
This function uses the dict in `self.name_mapping`. If `filename` is
in the mapping, then return the corresponding autosave file name.
Otherwise, construct a unique file name and update the mapping.
Args:
filename (str): original file name | def get_autosave_filename(self, filename):
try:
autosave_filename = self.name_mapping[filename]
except KeyError:
autosave_dir = get_conf_path('autosave')
if not osp.isdir(autosave_dir):
try:
os.mkdir(autosave_dir)
except EnvironmentError as error:
action = _('Error while creating autosave directory')
msgbox = AutosaveErrorDialog(action, error)
msgbox.exec_if_enabled()
autosave_filename = self.create_unique_autosave_filename(
filename, autosave_dir)
self.name_mapping[filename] = autosave_filename
self.stack.sig_option_changed.emit(
'autosave_mapping', self.name_mapping)
logger.debug('New autosave file name')
return autosave_filename | 54,263 |
Autosave a file.
Do nothing if the `changed_since_autosave` flag is not set or the file
is newly created (and thus not named by the user). Otherwise, save a
copy of the file with the name given by `self.get_autosave_filename()`
and clear the `changed_since_autosave` flag. Errors raised when saving
are silently ignored.
Args:
index (int): index into self.stack.data | def autosave(self, index):
finfo = self.stack.data[index]
document = finfo.editor.document()
if not document.changed_since_autosave or finfo.newly_created:
return
autosave_filename = self.get_autosave_filename(finfo.filename)
logger.debug('Autosaving %s to %s', finfo.filename, autosave_filename)
try:
self.stack._write_to_file(finfo, autosave_filename)
document.changed_since_autosave = False
except EnvironmentError as error:
action = (_('Error while autosaving {} to {}')
.format(finfo.filename, autosave_filename))
msgbox = AutosaveErrorDialog(action, error)
msgbox.exec_if_enabled() | 54,264 |
Constructor.
Args:
action (str): what Spyder was trying to do when error occured
error (Exception): the error that occured | def __init__(self, action, error):
logger.error(action, exc_info=error)
QDialog.__init__(self)
self.setWindowTitle(_('Autosave error'))
self.setModal(True)
layout = QVBoxLayout()
header = _('Error message:')
txt = '<br>{}<br><br>{}<br>{!s}'.format(action, header, error)
layout.addWidget(QLabel(txt))
layout.addSpacing(15)
txt = _("Hide all future autosave-related errors during this session")
self.dismiss_box = QCheckBox(txt)
layout.addWidget(self.dismiss_box)
layout.addSpacing(15)
button_box = QDialogButtonBox(QDialogButtonBox.Ok)
button_box.accepted.connect(self.accept)
layout.addWidget(button_box)
self.setLayout(layout) | 54,418 |
Return a string formatted delta for the values in x.
Args:
x: 2-item list of integers (representing number of calls) or
2-item list of floats (representing seconds of runtime).
Returns:
A list with [formatted x[0], [color, formatted delta]], where
color reflects whether x[1] is lower, greater, or the same as
x[0]. | def color_string(self, x):
diff_str = ""
color = "black"
if len(x) == 2 and self.compare_file is not None:
difference = x[0] - x[1]
if difference:
color, sign = ('green', '-') if difference < 0 else ('red', '+')
diff_str = '{}{}'.format(sign, self.format_measure(difference))
return [self.format_measure(x[0]), [diff_str, color]] | 54,707 |
Helper function to create a checkable action.
Args:
text (str): Text to be displayed in the action.
conf_name (str): configuration setting associated with the action
editorstack_method (str): name of EditorStack class that will be
used to update the changes in each editorstack. | def _create_checkable_action(self, text, conf_name, editorstack_method):
def toogle(checked):
self.switch_to_plugin()
self._toggle_checkable_action(checked, editorstack_method,
conf_name)
action = create_action(self, text, toggled=toogle)
action.setChecked(CONF.get('editor', conf_name))
return action | 54,842 |
Handle the toogle of a checkable action.
Update editorstacks and the configuration.
Args:
checked (bool): State of the action.
editorstack_method (str): name of EditorStack class that will be
used to update the changes in each editorstack.
conf_name (str): configuration setting associated with the action. | def _toggle_checkable_action(self, checked, editorstack_method, conf_name):
if self.editorstacks:
for editorstack in self.editorstacks:
try:
editorstack.__getattribute__(editorstack_method)(checked)
except AttributeError as e:
logger.error(e, exc_info=True)
# Run code analysis when `set_pep8_enabled` is toggled
if editorstack_method == 'set_pep8_enabled':
# TODO: Connect this to the LSP
#for finfo in editorstack.data:
# finfo.run_code_analysis(
# self.get_option('code_analysis/pyflakes'),
# checked)
pass
CONF.set('editor', conf_name, checked) | 54,843 |
Add text decorations on a CodeEditor instance.
Don't add duplicated decorations, and order decorations according
draw_order and the size of the selection.
Args:
decorations (sourcecode.api.TextDecoration) (could be a list)
Returns:
int: Amount of decorations added. | def add(self, decorations):
added = 0
if isinstance(decorations, list):
not_repeated = set(decorations) - set(self._decorations)
self._decorations.extend(list(not_repeated))
added = len(not_repeated)
elif decorations not in self._decorations:
self._decorations.append(decorations)
added = 1
if added > 0:
self._order_decorations()
self.update()
return added | 55,059 |
Set format to use in DataframeEditor.
Args:
new_format (string): e.g. "%.3f" | def set_dataframe_format(self, new_format):
self.sig_option_changed.emit('dataframe_format', new_format)
self.model.dataframe_format = new_format | 55,131 |
Allows you to make a direct REST call if you know the path
Arguments:
:param path: The path of the request. Example: sobjects/User/ABC123/password'
:param params: dict of parameters to pass to the path | def restful(self, path, params):
url = self._get_norm_base_url() + path
response = requests.get(url, headers=self._get_rest_headers(), params=params)
if response.status_code != 200:
raise Exception(response)
json_result = response.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result | 56,326 |
Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle".
Arguments:
kind -- is either map, combiner, or reduce | def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception):
try:
# Set up logging.
logging.basicConfig(level=logging.WARN)
kind = args is not None and args[1] or sys.argv[1]
Runner().run(kind, stdin=stdin, stdout=stdout)
except Exception as exc:
# Dump encoded data that we will try to fetch using mechanize
print_exception(exc)
raise | 56,799 |
Adds an event to the event file.
Args:
event: An `Event` protocol buffer. | def add_event(self, event):
if not isinstance(event, event_pb2.Event):
raise TypeError("Expected an event_pb2.Event proto, "
" but got %s" % type(event))
self._async_writer.write(event.SerializeToString()) | 57,262 |
Creates an _AsyncWriterThread.
Args:
queue: A Queue from which to dequeue data.
record_writer: An instance of record_writer writer.
flush_secs: How often, in seconds, to flush the
pending file to disk. | def __init__(self, queue, record_writer, flush_secs):
threading.Thread.__init__(self)
self.daemon = True
self._queue = queue
self._record_writer = record_writer
self._flush_secs = flush_secs
# The first data will be flushed immediately.
self._next_flush_time = 0
self._has_pending_data = False
self._shutdown_signal = object() | 57,267 |
Constructor of RunStates.
Args:
breakpoint_func: A callable of the signatuer:
def breakpoint_func():
which returns all the currently activated breakpoints. | def __init__(self, breakpoints_func=None):
# Maps from run key to debug_graphs_helper.DebugGraphWrapper instance.
self._run_key_to_original_graphs = dict()
self._run_key_to_debug_graphs = dict()
if breakpoints_func:
assert callable(breakpoints_func)
self._breakpoints_func = breakpoints_func | 57,271 |
Add a GraphDef.
Args:
run_key: A key for the run, containing information about the feeds,
fetches, and targets.
device_name: The name of the device that the `GraphDef` is for.
graph_def: An instance of the `GraphDef` proto.
debug: Whether `graph_def` consists of the debug ops. | def add_graph(self, run_key, device_name, graph_def, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
if not run_key in graph_dict:
graph_dict[run_key] = dict() # Mapping device_name to GraphDef.
graph_dict[run_key][tf.compat.as_str(device_name)] = (
debug_graphs_helper.DebugGraphWrapper(graph_def)) | 57,272 |
Get the runtime GraphDef protos associated with a run key.
Args:
run_key: A Session.run kay.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `dict` mapping device name to `GraphDef` protos. | def get_graphs(self, run_key, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
graph_wrappers = graph_dict.get(run_key, {})
graph_defs = dict()
for device_name, wrapper in graph_wrappers.items():
graph_defs[device_name] = wrapper.graph_def
return graph_defs | 57,273 |
Get the runtime GraphDef proto associated with a run key and a device.
Args:
run_key: A Session.run kay.
device_name: Name of the device in question.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `GraphDef` proto. | def get_graph(self, run_key, device_name, debug=False):
return self.get_graphs(run_key, debug=debug).get(device_name, None) | 57,274 |
Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details. | def on_core_metadata_event(self, event):
core_metadata = json.loads(event.log_message.message)
input_names = ','.join(core_metadata['input_names'])
output_names = ','.join(core_metadata['output_names'])
target_nodes = ','.join(core_metadata['target_nodes'])
self._run_key = RunKey(input_names, output_names, target_nodes)
if not self._graph_defs:
self._graph_defs_arrive_first = False
else:
for device_name in self._graph_defs:
self._add_graph_def(device_name, self._graph_defs[device_name])
self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))
# Wait for acknowledgement from client. Blocks until an item is got.
logger.info('on_core_metadata_event() waiting for client ack (meta)...')
self._incoming_channel.get()
logger.info('on_core_metadata_event() client ack received (meta).') | 57,277 |
Records the summary values based on an updated message from the debugger.
Logs an error message if writing the event to disk fails.
Args:
event: The Event proto to be processed. | def on_value_event(self, event):
if not event.summary.value:
logger.info('The summary of the event lacks a value.')
return None
# The node name property in the event proto is actually a watch key, which
# is a concatenation of several pieces of data.
watch_key = event.summary.value[0].node_name
tensor_value = debug_data.load_tensor_from_event(event)
device_name = _extract_device_name_from_event(event)
node_name, output_slot, debug_op = (
event.summary.value[0].node_name.split(':'))
maybe_base_expanded_node_name = (
self._run_states.get_maybe_base_expanded_node_name(node_name,
self._run_key,
device_name))
self._tensor_store.add(watch_key, tensor_value)
self._outgoing_channel.put(_comm_tensor_data(
device_name, node_name, maybe_base_expanded_node_name, output_slot,
debug_op, tensor_value, event.wall_time))
logger.info('on_value_event(): waiting for client ack (tensors)...')
self._incoming_channel.get()
logger.info('on_value_event(): client ack received (tensor).')
# Determine if the particular debug watch key is in the current list of
# breakpoints. If it is, send an EventReply() to unblock the debug op.
if self._is_debug_node_in_breakpoints(event.summary.value[0].node_name):
logger.info('Sending empty EventReply for breakpoint: %s',
event.summary.value[0].node_name)
# TODO(cais): Support receiving and sending tensor value from front-end.
return debug_service_pb2.EventReply()
return None | 57,280 |
Get the traceback of an op in the latest version of the TF graph.
Args:
op_name: Name of the op.
Returns:
Creation traceback of the op, in the form of a list of 2-tuples:
(file_path, lineno)
Raises:
ValueError: If the op with the given name cannot be found in the latest
version of the graph that this SourceManager instance has received, or
if this SourceManager instance has not received any graph traceback yet. | def get_op_traceback(self, op_name):
if not self._graph_traceback:
raise ValueError('No graph traceback has been received yet.')
for op_log_entry in self._graph_traceback.log_entries:
if op_log_entry.name == op_name:
return self._code_def_to_traceback_list(op_log_entry.code_def)
raise ValueError(
'No op named "%s" can be found in the graph of the latest version '
' (%d).' % (op_name, self._graph_version)) | 57,285 |
Receives health pills from a debugger and writes them to disk.
Args:
receive_port: The port at which to receive health pills from the
TensorFlow debugger.
always_flush: A boolean indicating whether the EventsWriter will be
flushed after every write. Can be used for testing. | def __init__(self, receive_port):
super(InteractiveDebuggerDataServer, self).__init__(
receive_port, InteractiveDebuggerDataStreamHandler)
self._incoming_channel = queue.Queue()
self._outgoing_channel = comm_channel_lib.CommChannel()
self._run_states = RunStates(breakpoints_func=lambda: self.breakpoints)
self._tensor_store = tensor_store_lib.TensorStore()
self._source_manager = SourceManager()
curried_handler_constructor = functools.partial(
InteractiveDebuggerDataStreamHandler,
self._incoming_channel, self._outgoing_channel, self._run_states,
self._tensor_store)
grpc_debug_server.EventListenerBaseServicer.__init__(
self, receive_port, curried_handler_constructor) | 57,288 |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 39