input
stringlengths 11
7.65k
| target
stringlengths 22
8.26k
|
---|---|
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_single_quote_when_single_quoted(self):
encoded = cypher_repr(u"'", quote=u"'")
assert encoded == u"'\\''" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_list(self):
encoded = cypher_repr([1, 2.0, u"three"])
assert encoded == u"[1, 2.0, 'three']" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_empty_list(self):
encoded = cypher_repr([])
assert encoded == u"[]" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_map(self):
encoded = cypher_repr(OrderedDict([("one", 1), ("two", 2.0), ("number three", u"three")]))
assert encoded == u"{one: 1, two: 2.0, `number three`: 'three'}" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_empty_map(self):
encoded = cypher_repr({})
assert encoded == u"{}" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_empty_node(self):
a = Node()
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({})" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_node_with_property(self):
a = Node(name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({name: 'Alice'})" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_node_with_label(self):
a = Node("Person")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {})" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_should_encode_node_with_label_and_property(self):
a = Node("Person", name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {name: 'Alice'})" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_can_encode_relationship(self):
a = Node(name="Alice")
b = Node(name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_can_encode_relationship_with_names(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_can_encode_relationship_with_alternative_names(self):
a = Node("Person", nom=u"Aimée")
b = Node("Person", nom=u"Baptiste")
ab = KNOWS_FR(a, b)
encoded = cypher_repr(ab, related_node_template=u"{property.nom}")
self.assertEqual(u"(Aimée)-[:CONNAÎT {}]->(Baptiste)", encoded) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def test_can_encode_relationship_with_properties(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b, since=1999)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {since: 1999}]->(Bob)", encoded) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __getattr__(cls, name):
return MagicMock() |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def combinationSum(self, candidates, target):
candidates.sort()
self.result = []
self.dfs(candidates,target,0,[])
return self.result |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def dfs(self,candidates,target,start,reslist):
length = len(candidates)
if target == 0:
return self.result.append(reslist) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def combinationSum(self, candidates, target):
self.result = []
self.dfs(candidates,0,target,[])
return self.result |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def dfs(self,can,cursum,target,res):
if cursum > target: return
if cursum == target:
self.result.append(res)
return
for i in xrange(len(can)):
if not res or res[len(res)-1] <= can[i]:
self.dfs(can,cursum+can[i],target,res+[can[i]]) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def gpio_init(pin, output):
try:
with open(f"/sys/class/gpio/gpio{pin}/direction", 'wb') as f:
f.write(b"out" if output else b"in")
except Exception as e:
print(f"Failed to set gpio {pin} direction: {e}") |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def batch_norm(inputs, training, data_format, name=''):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.compat.v1.layers.batch_normalization(
inputs=inputs,
axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=training,
fused=True,
name=name) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, user_names_str):
if not hasattr(context, 'users'):
context.users = {}
user_names = [name.strip() for name in re.split('and|,', user_names_str)]
for user_name in user_names:
token = 'fake_token_' + user_name
user_id = context.helpers.create_test_user(user_name, token)
context.users[user_name] = {'token': token, 'id': user_id} |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, user_name):
context.token = context.users[user_name]['token'] |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format,
name):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.compat.v1.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
reuse=tf.AUTO_REUSE,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
data_format=data_format,
name=name) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, user_name, count):
context.helpers.load_postcards(user_name, count) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _building_block_v2(inputs, filters, training, projection_shortcut, strides,
data_format, name):
"""A single block for ResNet v2, without a bottleneck.
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference mode.
Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
name: Block name.
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
first_name = name + 'first'
inputs = batch_norm(
inputs, training, data_format, name=first_name + 'batch_norm')
inputs = tf.nn.relu(inputs, name=first_name + 'relu')
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs, name=first_name + 'proj')
second_name = name + 'second'
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format,
name=second_name + 'input')
inputs = batch_norm(
inputs, training, data_format, name=second_name + 'batch_norm')
inputs = tf.nn.relu(inputs, name=second_name + 'relu')
third_name = name + 'third'
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format,
name=third_name + 'input')
return inputs + shortcut |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, rel_url):
context.request = LazyRequest(
'GET', context.helpers.url(rel_url), context.token) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def projection_shortcut(inputs, name):
return conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
data_format=data_format,
name=name) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, rel_url):
context.request = LazyRequest(
'POST', context.helpers.url(rel_url), context.token) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(self,
resnet_size,
bottleneck,
num_classes,
num_filters,
kernel_size,
conv_stride,
first_pool_size,
first_pool_stride,
block_sizes,
block_strides,
resnet_version=DEFAULT_VERSION,
data_format=None,
dtype=DEFAULT_DTYPE):
"""Creates a model for classifying an image.
Args:
resnet_size: A single integer for the size of the ResNet model.
bottleneck: Use regular blocks or bottleneck blocks.
num_classes: The number of classes used as labels.
num_filters: The number of filters to use for the first block layer of the
model. This number is then doubled for each subsequent block layer.
kernel_size: The kernel size to use for convolution.
conv_stride: stride size for the initial convolutional layer
first_pool_size: Pool size to be used for the first pooling layer. If
none, the first pooling layer is skipped.
first_pool_stride: stride size for the first pooling layer. Not used if
first_pool_size is None.
block_sizes: A list containing n values, where n is the number of sets of
block layers desired. Each value should be the number of blocks in the
i-th set.
block_strides: List of integers representing the desired stride size for
each of the sets of block layers. Should be same length as block_sizes.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
data_format: Input format ('channels_last', 'channels_first', or None). If
set to None, the format is dependent on whether a GPU is available.
dtype: The TensorFlow dtype to use for calculations. If not specified
tf.float32 is used.
Raises:
ValueError: if invalid version is selected.
"""
self.resnet_size = resnet_size
if not data_format:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
self.resnet_version = resnet_version
if resnet_version not in (1, 2):
raise ValueError(
'Resnet version should be 1 or 2. See README for citations.')
self.bottleneck = bottleneck
self.block_fn = _building_block_v2
if dtype not in ALLOWED_TYPES:
raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES))
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
self.block_sizes = block_sizes
self.block_strides = block_strides
self.dtype = dtype
self.pre_activation = resnet_version == 2 |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, name, field):
context.request.add_file(context.helpers.file_path(name), field) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _custom_dtype_getter(self, # pylint: disable=keyword-arg-before-vararg
getter,
name,
shape=None,
dtype=DEFAULT_DTYPE,
*args,
**kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
if dtype in CASTABLE_TYPES:
var = getter(name, shape, tf.float32, *args, **kwargs)
return tf.cast(var, dtype=dtype, name=name + '_cast')
else:
return getter(name, shape, dtype, *args, **kwargs) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context):
data = json.loads(context.text)
receiver_name = re.match(r"\<(\w+)'s id\>", data['receiver']).group(1)
data['receiver'] = context.users[receiver_name]['id']
context.request.add_data(data) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _model_variable_scope(self):
"""Returns a variable scope that the model should be created under.
If self.dtype is a castable type, model variable will be created in fp32
then cast to self.dtype before being used.
Returns:
A variable scope for the model.
"""
return tf.compat.v1.variable_scope(
'resnet_model',
custom_getter=self._custom_dtype_getter,
reuse=tf.AUTO_REUSE) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, state, code):
context.response = context.request.send()
context.response.status_code.should.equal(int(code)) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
with self._model_variable_scope():
if self.data_format == 'channels_first':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(a=inputs, perm=[0, 3, 1, 2])
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=self.num_filters,
kernel_size=self.kernel_size,
strides=self.conv_stride,
data_format=self.data_format,
name='initial_input')
inputs = tf.identity(inputs, 'initial_conv')
# We do not include batch normalization or activation functions in V2
# for the initial conv1 because the first ResNet unit will perform these
# for both the shortcut and non-shortcut paths as part of the first
# block's projection. Cf. Appendix of [2].
if self.resnet_version == 1:
inputs = batch_norm(inputs, training, self.data_format)
inputs = tf.nn.relu(inputs)
if self.first_pool_size:
inputs = tf.compat.v1.layers.max_pooling2d(
inputs=inputs,
pool_size=self.first_pool_size,
strides=self.first_pool_stride,
padding='SAME',
data_format=self.data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
for i, num_blocks in enumerate(self.block_sizes):
# We now have 4 block layers, but the last does not
# double the number of filters.
# We also skip the projection shortcut in the first block layer.
num_filters = self.num_filters * min((2**i), 4)
shortcut = i != 0
inputs = block_layer(
inputs=inputs,
filters=num_filters,
bottleneck=self.bottleneck,
block_fn=self.block_fn,
blocks=num_blocks,
strides=self.block_strides[i],
training=training,
name='block_layer{}'.format(i + 1),
data_format=self.data_format,
shortcut=shortcut)
# Skip the last BN+relu.
# Only apply the BN and ReLU for model that does pre_activation in each
# building/bottleneck block, eg resnet V2.
# if self.pre_activation:
# inputs = batch_norm(inputs, training, self.data_format,
# name='pre_act'+'batch_norm')
# inputs = tf.nn.relu(inputs,name='pre_act'+'relu')
# The current top layer has shape
# `batch_size x pool_size x pool_size x final_size`.
# ResNet does an Average Pooling layer over pool_size,
# but that is the same as doing a reduce_mean. We do a reduce_mean
# here because it performs better than AveragePooling2D.
# Also perform max-pooling, and concat results.
axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
avg_pooled = tf.reduce_mean(input_tensor=inputs, axis=axes, keepdims=True)
avg_pooled = tf.squeeze(avg_pooled, axes)
max_pooled = tf.reduce_max(input_tensor=inputs, axis=axes, keepdims=True)
max_pooled = tf.squeeze(max_pooled, axes)
inputs = tf.concat([avg_pooled, max_pooled], axis=1)
inputs = tf.identity(inputs, 'final_pooling')
inputs = tf.compat.v1.layers.dense(
inputs=inputs, units=self.num_classes, reuse=tf.AUTO_REUSE)
inputs = tf.identity(inputs, 'final_dense')
return inputs |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def step_impl(context, count):
cnt = len(context.response.json())
cnt.should.equal(int(count)) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(self, leaf):
self.leaf = leaf
self.lchild = None
self.rchild = None |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def get_leafs(self):
if self.lchild == None and self.rchild == None:
return [self.leaf]
else:
return self.lchild.get_leafs()+self.rchild.get_leafs() |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def get_level(self, level, queue):
if queue == None:
queue = []
if level == 1:
queue.push(self)
else:
if self.lchild != None:
self.lchild.get_level(level-1, queue)
if self.rchild != None:
self.rchild.get_level(level-1, queue)
return queue |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def paint(self, c):
self.leaf.paint(c)
if self.lchild != None:
self.lchild.paint(c)
if self.rchild != None:
self.rchild.paint(c) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
self.center = (self.x+int(self.w/2),self.y+int(self.h/2))
self.distance_from_center = sqrt((self.center[0]-MAP_WIDTH/2)**2 + (self.center[1]-MAP_HEIGHT/2)**2) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def paint(self, c):
c.stroke_rectangle(self.x, self.y, self.w, self.h) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def draw_path(self,c,container):
c.path(self.center[0],self.center[1],container.center[0],container.center[1]) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(self, w, h, color = "empty"):
self.board = zeros((h,w), dtype=uint8)
self.w = w
self.h = h
self.set_brush(color) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def set_brush(self, code):
self.color = self.brushes[code] |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def stroke_rectangle(self, x, y, w, h):
self.line(x,y,w,True)
self.line(x,y+h-1,w,True)
self.line(x,y,h,False)
self.line(x+w-1,y,h,False) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def filled_rectangle(self, x, y, w, h):
self.board[y:y+h,x:x+w] = self.color |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def line(self, x, y, length, horizontal):
if horizontal:
self.board[y,x:x+length] = self.color
else:
self.board[y:y+length,x] = self.color |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def path(self,x1,y1,x2,y2):
self.board[y1:y2+1,x1:x2+1] = self.color |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def circle(self,x,y,r):
for x_offset in range(-r,r+1):
for y_offset in range(-r,r+1):
if sqrt(x_offset**2+y_offset**2)<r:
self.board[x+x_offset,y+y_offset] = self.color |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def draw(self):
im = Image.fromarray(self.board)
im.save(MAP_NAME) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __str__(self):
return str(self.board) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(self, container):
self.x = container.x+randint(1, floor(container.w/3))
self.y = container.y+randint(1, floor(container.h/3))
self.w = container.w-(self.x-container.x)
self.h = container.h-(self.y-container.y)
self.w -= randint(0,floor(self.w/3))
self.h -= randint(0,floor(self.w/3))
self.environment = int(min(4,10*(container.distance_from_center/MAP_WIDTH)+random()*2-1))
roll = random()*0.9+(2*container.distance_from_center/MAP_WIDTH)*0.1
self.biome = next(n for n,b in enumerate(self.biomes_CDF) if roll<b) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def paint(self,c):
c.filled_rectangle(self.x, self.y,self.w, self.h) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _split_vertical(container):
r1 = None
r2 = None
min_w = int(W_RATIO*container.h)+1
if container.w < 2*min_w:
return None
r1 = Container(container.x,container.y,randint(min_w, container.w-min_w),container.h)
r2 = Container(container.x+r1.w,container.y,container.w-r1.w,container.h)
return [r1, r2] |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _split_horizontal(container):
r1 = None
r2 = None
min_h = int(H_RATIO*container.w)+1
if container.h < 2*min_h:
return None
r1 = Container(container.x,container.y,container.w,randint(min_h, container.h-min_h))
r2 = Container(container.x,container.y+r1.h,container.w,container.h-r1.h)
return [r1, r2] |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def split_container(container, iter):
root = Tree(container)
if iter != 0:
sr = random_split(container)
if sr!=None:
root.lchild = split_container(sr[0], iter-1)
root.rchild = split_container(sr[1], iter-1)
return root |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def draw_paths(c, tree):
if tree.lchild == None or tree.rchild == None:
return
tree.lchild.leaf.draw_path(c, tree.rchild.leaf)
draw_paths(c, tree.lchild)
draw_paths(c, tree.rchild) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def init(num_players):
global MAP_WIDTH,MAP_HEIGHT,N_ITERATIONS,H_RATIO,W_RATIO,MIN_ROOM_SIDE,CENTER_HUB_HOLE,CENTER_HUB_RADIO,MAP_NAME
MAP_WIDTH=int(500*sqrt(num_players))
MAP_HEIGHT=MAP_WIDTH
N_ITERATIONS=log(MAP_WIDTH*100,2)
H_RATIO=0.49
W_RATIO=H_RATIO
MIN_ROOM_SIDE = 32
CENTER_HUB_HOLE = 32
CENTER_HUB_RADIO = CENTER_HUB_HOLE-MIN_ROOM_SIDE/2
MAP_NAME="result%s.png"%MAP_WIDTH |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def get_attn(attn_type):
if isinstance(attn_type, torch.nn.Module):
return attn_type
module_cls = None
if attn_type is not None:
if isinstance(attn_type, str):
attn_type = attn_type.lower()
# Lightweight attention modules (channel and/or coarse spatial).
# Typically added to existing network architecture blocks in addition to existing convolutions.
if attn_type == 'se':
module_cls = SEModule
elif attn_type == 'ese':
module_cls = EffectiveSEModule
elif attn_type == 'eca':
module_cls = EcaModule
elif attn_type == 'ecam':
module_cls = partial(EcaModule, use_mlp=True)
elif attn_type == 'ceca':
module_cls = CecaModule
elif attn_type == 'ge':
module_cls = GatherExcite
elif attn_type == 'gc':
module_cls = GlobalContext
elif attn_type == 'gca':
module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False)
elif attn_type == 'cbam':
module_cls = CbamModule
elif attn_type == 'lcbam':
module_cls = LightCbamModule
# Attention / attention-like modules w/ significant params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'sk':
module_cls = SelectiveKernel
elif attn_type == 'splat':
module_cls = SplitAttn
# Self-attention / attention-like modules w/ significant compute and/or params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'lambda':
return LambdaLayer
elif attn_type == 'bottleneck':
return BottleneckAttn
elif attn_type == 'halo':
return HaloAttn
elif attn_type == 'nl':
module_cls = NonLocalAttn
elif attn_type == 'bat':
module_cls = BatNonLocalAttn
# Woops!
else:
assert False, "Invalid attn module (%s)" % attn_type
elif isinstance(attn_type, bool):
if attn_type:
module_cls = SEModule
else:
module_cls = attn_type
return module_cls |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'description': image.description,
'is_public': image.is_public,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def start(self, action_name: str) -> None:
"""Defines how to start recording an action.""" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def install_secret_key(app, filename='secret_key'):
"""Configure the SECRET_KEY from a file
in the instance directory.
If the file does not exist, print instructions
to create it from a shell with a random key,
then exit.
"""
filename = os.path.join(app.instance_path, filename)
try:
app.config['SECRET_KEY'] = open(filename, 'rb').read()
except IOError:
print('Error: No secret key. Create it with:')
full_path = os.path.dirname(filename)
if not os.path.isdir(full_path):
print('mkdir -p {filename}'.format(filename=full_path))
print('head -c 24 /dev/urandom > {filename}'.format(filename=filename))
sys.exit(1) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def stop(self, action_name: str) -> None:
"""Defines how to record the duration once an action is complete.""" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def not_found(error):
return render_template('404.html'), 404 |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def summary(self) -> str:
"""Create profiler summary in text format.""" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def after_request(response):
response.headers.add('X-Test', 'This is only test.')
response.headers.add('Access-Control-Allow-Origin', '*') # TODO: set to real origin
return response |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def setup(self, **kwargs: Any) -> None:
"""Execute arbitrary pre-profiling set-up steps as defined by subclass.""" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def teardown(self, **kwargs: Any) -> None:
"""Execute arbitrary post-profiling tear-down steps as defined by subclass.""" |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(
self,
dirpath: Optional[Union[str, Path]] = None,
filename: Optional[str] = None,
) -> None:
self.dirpath = dirpath
self.filename = filename
self._output_file: Optional[TextIO] = None
self._write_stream: Optional[Callable] = None
self._local_rank: Optional[int] = None
self._log_dir: Optional[str] = None
self._stage: Optional[str] = None |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def profile(self, action_name: str) -> Generator:
"""
Yields a context manager to encapsulate the scope of a profiled action.
Example::
with self.profile('load training data'):
# load training data code
The profiler will start once you've entered the context and will automatically
stop once you exit the code block.
"""
try:
self.start(action_name)
yield action_name
finally:
self.stop(action_name) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def profile_iterable(self, iterable: Iterable, action_name: str) -> Generator:
iterator = iter(iterable)
while True:
try:
self.start(action_name)
value = next(iterator)
self.stop(action_name)
yield value
except StopIteration:
self.stop(action_name)
break |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _rank_zero_info(self, *args, **kwargs) -> None:
if self._local_rank in (None, 0):
log.info(*args, **kwargs) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _prepare_filename(
self, action_name: Optional[str] = None, extension: str = ".txt", split_token: str = "-"
) -> str:
args = []
if self._stage is not None:
args.append(self._stage)
if self.filename:
args.append(self.filename)
if self._local_rank is not None:
args.append(str(self._local_rank))
if action_name is not None:
args.append(action_name)
filename = split_token.join(args) + extension
return filename |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _prepare_streams(self) -> None:
if self._write_stream is not None:
return
if self.filename:
filepath = os.path.join(self.dirpath, self._prepare_filename())
fs = get_filesystem(filepath)
file = fs.open(filepath, "a")
self._output_file = file
self._write_stream = file.write
else:
self._write_stream = self._rank_zero_info |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def describe(self) -> None:
"""Logs a profile report after the conclusion of run."""
# there are pickling issues with open file handles in Python 3.6
# so to avoid them, we open and close the files within this function
# by calling `_prepare_streams` and `teardown`
self._prepare_streams()
summary = self.summary()
if summary:
self._write_stream(summary)
if self._output_file is not None:
self._output_file.flush()
self.teardown(stage=self._stage) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def _stats_to_str(self, stats: Dict[str, str]) -> str:
stage = f"{self._stage.upper()} " if self._stage is not None else ""
output = [stage + "Profiler Report"]
for action, value in stats.items():
header = f"Profile stats for: {action}"
if self._local_rank is not None:
header += f" rank: {self._local_rank}"
output.append(header)
output.append(value)
return os.linesep.join(output) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def setup(
self, stage: Optional[str] = None, local_rank: Optional[int] = None, log_dir: Optional[str] = None
) -> None:
"""Execute arbitrary pre-profiling set-up steps."""
self._stage = stage
self._local_rank = local_rank
self._log_dir = log_dir
self.dirpath = self.dirpath or log_dir |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def teardown(self, stage: Optional[str] = None) -> None:
"""
Execute arbitrary post-profiling tear-down steps.
Closes the currently open file and stream.
"""
self._write_stream = None
if self._output_file is not None:
self._output_file.close()
self._output_file = None # can't pickle TextIOWrapper |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __del__(self) -> None:
self.teardown(stage=self._stage) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def start(self, action_name: str) -> None:
raise NotImplementedError |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def stop(self, action_name: str) -> None:
raise NotImplementedError |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def summary(self) -> str:
raise NotImplementedError |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def local_rank(self) -> int:
return 0 if self._local_rank is None else self._local_rank |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def start(self, action_name: str) -> None:
pass |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def stop(self, action_name: str) -> None:
pass |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(self, model, data):
# try and import pytorch
global torch
if torch is None:
import torch
if version.parse(torch.__version__) < version.parse("0.4"):
warnings.warn("Your PyTorch version is older than 0.4 and not supported.")
# check if we have multiple inputs
self.multi_input = False
if type(data) == list:
self.multi_input = True
if type(data) != list:
data = [data]
self.data = data
self.layer = None
self.input_handle = None
self.interim = False
self.interim_inputs_shape = None
self.expected_value = None # to keep the DeepExplainer base happy
if type(model) == tuple:
self.interim = True
model, layer = model
model = model.eval()
self.layer = layer
self.add_target_handle(self.layer)
# if we are taking an interim layer, the 'data' is going to be the input
# of the interim layer; we will capture this using a forward hook
with torch.no_grad():
_ = model(*data)
interim_inputs = self.layer.target_input
if type(interim_inputs) is tuple:
# this should always be true, but just to be safe
self.interim_inputs_shape = [i.shape for i in interim_inputs]
else:
self.interim_inputs_shape = [interim_inputs.shape]
self.target_handle.remove()
del self.layer.target_input
self.model = model.eval()
self.multi_output = False
self.num_outputs = 1
with torch.no_grad():
outputs = model(*data)
# also get the device everything is running on
self.device = outputs.device
if outputs.shape[1] > 1:
self.multi_output = True
self.num_outputs = outputs.shape[1]
self.expected_value = outputs.mean(0).cpu().numpy() |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def __init__(self, reddit, term, config, oauth, url=None, submission=None):
super(SubmissionPage, self).__init__(reddit, term, config, oauth)
self.controller = SubmissionController(self, keymap=config.keymap)
if url:
self.content = SubmissionContent.from_url(
reddit, url, term.loader,
max_comment_cols=config['max_comment_cols'])
else:
self.content = SubmissionContent(
submission, term.loader,
max_comment_cols=config['max_comment_cols'])
# Start at the submission post, which is indexed as -1
self.nav = Navigator(self.content.get, page_index=-1)
self.selected_subreddit = None |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def add_target_handle(self, layer):
input_handle = layer.register_forward_hook(get_target_input)
self.target_handle = input_handle |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def toggle_comment(self):
"Toggle the selected comment tree between visible and hidden"
current_index = self.nav.absolute_index
self.content.toggle(current_index)
# This logic handles a display edge case after a comment toggle. We
# want to make sure that when we re-draw the page, the cursor stays at
# its current absolute position on the screen. In order to do this,
# apply a fixed offset if, while inverted, we either try to hide the
# bottom comment or toggle any of the middle comments.
if self.nav.inverted:
data = self.content.get(current_index)
if data['hidden'] or self.nav.cursor_index != 0:
window = self._subwindows[-1][0]
n_rows, _ = window.getmaxyx()
self.nav.flip(len(self._subwindows) - 1)
self.nav.top_item_height = n_rows |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
"""
handles_list = []
model_children = list(model.children())
if model_children:
for child in model_children:
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else: # leaves
handles_list.append(model.register_forward_hook(forward_handle))
handles_list.append(model.register_backward_hook(backward_handle))
return handles_list |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def exit_submission(self):
"Close the submission and return to the subreddit page"
self.active = False |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def remove_attributes(self, model):
"""
Removes the x and y attributes which were added by the forward handles
Recursively searches for non-container layers
"""
for child in model.children():
if 'nn.modules.container' in str(type(child)):
self.remove_attributes(child)
else:
try:
del child.x
except AttributeError:
pass
try:
del child.y
except AttributeError:
pass |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def refresh_content(self, order=None, name=None):
"Re-download comments and reset the page index"
order = order or self.content.order
url = name or self.content.name
with self.term.loader('Refreshing page'):
self.content = SubmissionContent.from_url(
self.reddit, url, self.term.loader, order=order,
max_comment_cols=self.config['max_comment_cols'])
if not self.term.loader.exception:
self.nav = Navigator(self.content.get, page_index=-1) |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def gradient(self, idx, inputs):
self.model.zero_grad()
X = [x.requires_grad_() for x in inputs]
outputs = self.model(*X)
selected = [val for val in outputs[:, idx]]
grads = []
if self.interim:
interim_inputs = self.layer.target_input
for idx, input in enumerate(interim_inputs):
grad = torch.autograd.grad(selected, input,
retain_graph=True if idx + 1 < len(interim_inputs) else None,
allow_unused=True)[0]
if grad is not None:
grad = grad.cpu().numpy()
else:
grad = torch.zeros_like(X[idx]).cpu().numpy()
grads.append(grad)
del self.layer.target_input
return grads, [i.detach().cpu().numpy() for i in interim_inputs]
else:
for idx, x in enumerate(X):
grad = torch.autograd.grad(selected, x,
retain_graph=True if idx + 1 < len(X) else None,
allow_unused=True)[0]
if grad is not None:
grad = grad.cpu().numpy()
else:
grad = torch.zeros_like(X[idx]).cpu().numpy()
grads.append(grad)
return grads |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def prompt_subreddit(self):
"Open a prompt to navigate to a different subreddit"
name = self.term.prompt_input('Enter page: /')
if name is not None:
with self.term.loader('Loading page'):
content = SubredditContent.from_name(
self.reddit, name, self.term.loader)
if not self.term.loader.exception:
self.selected_subreddit = content
self.active = False |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def shap_values(self, X, ranked_outputs=None, output_rank_order="max", check_additivity=False):
# X ~ self.model_input
# X_data ~ self.data
# check if we have multiple inputs
if not self.multi_input:
assert type(X) != list, "Expected a single tensor model input!"
X = [X]
else:
assert type(X) == list, "Expected a list of model inputs!"
X = [x.detach().to(self.device) for x in X]
if ranked_outputs is not None and self.multi_output:
with torch.no_grad():
model_output_values = self.model(*X)
# rank and determine the model outputs that we will explain
if output_rank_order == "max":
_, model_output_ranks = torch.sort(model_output_values, descending=True)
elif output_rank_order == "min":
_, model_output_ranks = torch.sort(model_output_values, descending=False)
elif output_rank_order == "max_abs":
_, model_output_ranks = torch.sort(torch.abs(model_output_values), descending=True)
else:
assert False, "output_rank_order must be max, min, or max_abs!"
model_output_ranks = model_output_ranks[:, :ranked_outputs]
else:
model_output_ranks = (torch.ones((X[0].shape[0], self.num_outputs)).int() *
torch.arange(0, self.num_outputs).int())
# add the gradient handles
handles = self.add_handles(self.model, add_interim_values, deeplift_grad)
if self.interim:
self.add_target_handle(self.layer)
# compute the attributions
output_phis = []
for i in range(model_output_ranks.shape[1]):
phis = []
if self.interim:
for k in range(len(self.interim_inputs_shape)):
phis.append(np.zeros((X[0].shape[0], ) + self.interim_inputs_shape[k][1: ]))
else:
for k in range(len(X)):
phis.append(np.zeros(X[k].shape))
for j in range(X[0].shape[0]):
# tile the inputs to line up with the background data samples
tiled_X = [X[l][j:j + 1].repeat(
(self.data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape) - 1)])) for l
in range(len(X))]
joint_x = [torch.cat((tiled_X[l], self.data[l]), dim=0) for l in range(len(X))]
# run attribution computation graph
feature_ind = model_output_ranks[j, i]
sample_phis = self.gradient(feature_ind, joint_x)
# assign the attributions to the right part of the output arrays
if self.interim:
sample_phis, output = sample_phis
x, data = [], []
for k in range(len(output)):
x_temp, data_temp = np.split(output[k], 2)
x.append(x_temp)
data.append(data_temp)
for l in range(len(self.interim_inputs_shape)):
phis[l][j] = (sample_phis[l][self.data[l].shape[0]:] * (x[l] - data[l])).mean(0)
else:
for l in range(len(X)):
phis[l][j] = (torch.from_numpy(sample_phis[l][self.data[l].shape[0]:]).to(self.device) * (X[l][j: j + 1] - self.data[l])).cpu().detach().numpy().mean(0)
output_phis.append(phis[0] if not self.multi_input else phis)
# cleanup; remove all gradient handles
for handle in handles:
handle.remove()
self.remove_attributes(self.model)
if self.interim:
self.target_handle.remove()
if not self.multi_output:
return output_phis[0]
elif ranked_outputs is not None:
return output_phis, model_output_ranks
else:
return output_phis |
def emit(self, level, message):
raise NotImplementedError('Please implement an emit method') | def open_link(self):
"Open the selected item with the webbrowser"
data = self.get_selected_item()
url = data.get('permalink')
if url:
self.term.open_browser(url)
else:
self.term.flash() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.