repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
vmax-feihu/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/auth/urls.py | 104 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^password_change/$', 'django.contrib.auth.views.password_change', name='password_change'),
url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done', name='password_change_done'),
url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'),
url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),
# Support old style base36 password reset links; remove in Django 1.7
url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm_uidb36'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm',
name='password_reset_confirm'),
url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'),
)
|
lukeiwanski/tensorflow | refs/heads/master | tensorflow/python/ops/array_ops.py | 6 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors.
See the @{$python/array_ops} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
@tf_export("identity")
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if context.executing_eagerly():
input = ops.convert_to_tensor(input)
in_device = input.device
# TODO(ashankar): Does 'identity' need to invoke execution callbacks?
context_device = context.context().device_name
if not context_device:
context_device = "/job:localhost/replica:0/task:0/device:CPU:0"
if context_device != in_device:
return input._copy() # pylint: disable=protected-access
return input
else:
return gen_array_ops.identity(input, name=name)
# pylint: disable=redefined-builtin,protected-access
@tf_export("expand_dims")
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`. Must be in the range
`[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor`.
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if both `dim` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated(
"2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@tf_export("setdiff1d")
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
def broadcast_dynamic_shape(shape_x, shape_y):
"""Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
def broadcast_static_shape(shape_x, shape_y):
"""Returns the broadcasted static shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape")
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size")
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type
of the operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type
of the operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if context.executing_eagerly() and not isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-acces:
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# skip every row and reverse every column
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via @{tf.Tensor.__getitem__} and @{tf.Variable.__getitem__}.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1. `begin[i]`, `end[i]` and
`strides[i]` must imply a slice of size 1 in the dimension. For example in
Python one might do `foo[:, 3, :]` which would result in
`shrink_axis_mask` equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
if not context.executing_eagerly():
# TODO(apassos) In eager mode assignment will be done by overriding
# __setitem__ instead.
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See @{tf.Tensor.__getitem__} for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
return _slice_helper(var._AsTensor(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.stack([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" % (axis, -expanded_num_dims,
expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
# checking.
if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype,
elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack.
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
dimension. Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) # [4, 3]
tf.shape(tf.concat([t3, t4], 1)) # [2, 6]
```
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
```python
t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
tf.concat([t1, t2], -1)
```
would produce:
```python
[[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing
for axis is 0-based. Positive axis in the rage of
`[0, rank(values))` refers to `axis`-th dimension. And negative axis
refers to `axis + rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_is_compatible_with(
tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export("boolean_mask")
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from.
By default, axis is 0 which will mask from the first dimension. Otherwise
K + axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate([first_dim])
.concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = setdiff1d(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
def unique(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer type, `num_split`, then splits `value`
along dimension `axis` into `num_split` smaller tensors.
Requires that `num_split` evenly divides `value.shape[axis]`.
If `num_or_size_splits` is not an integer type, it is presumed to be a Tensor
`size_splits`, then splits `value` into `len(size_splits)` pieces. The shape
of the `i`-th piece has the same size as the `value` except along dimension
`axis` where the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either a 0-D integer `Tensor` indicating the number of
splits along split_dim or a 1-D integer `Tensor` containing
the sizes of each output tensor along split_dim. If a scalar then it must
evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
split dimension must match that of the `value`.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if size_splits._rank() == 0 and size_splits.dtype.is_integer:
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose")
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
transpose_fn = (
gen_array_ops.conjugate_transpose
if (conjugate and a.dtype.is_complex) else gen_array_ops.transpose)
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = transpose_fn(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
if not context.executing_eagerly():
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = transpose_fn(a, perm, name=name)
return ret
# pylint: disable=invalid-name
@tf_export("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.matrix_transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `matrix_transposes` return a new tensor
with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.conj(tf.matrix_transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat((gen_math_ops._range(0, a_rank - 2, 1),
[a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
# pylint: enable=invalid-name
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except TypeError:
# Happens when shape is a Tensor, list with Tensor elements, etc.
pass
return None
@tf_export("zeros")
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export("zeros_like")
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if context.executing_eagerly():
if dtype is not None and dtype != tensor.dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
with ops.device(tensor.device):
return gen_array_ops.zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor.shape.is_fully_defined() and
tensor.dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export("ones_like")
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export("placeholder")
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
@compatibility(eager)
Placeholders are not compatible with eager execution.
@end_compatibility
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
@tf_export("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
@tf_export("pad")
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if constant_values != 0:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
else:
result = gen_array_ops.pad(tensor, paddings, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = tensor_util.constant_value(
result.op.inputs[1], partial=True)
input_shape = result.op.inputs[0].shape
if (input_shape.ndims is not None and not result.shape.is_fully_defined()
and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
@tf_export("meshgrid")
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape()[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack(
[[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
return result_paddings, result_crops
@tf_export("space_to_batch")
def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("batch_to_space")
def batch_to_space(input, crops, block_size, name=None): # pylint: disable=redefined-builtin
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("one_hot")
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot",
[indices, depth, on_value, off_value, axis,
dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = (ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists
else None)
off_dtype = (ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists
else None)
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export("squeeze")
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
Must be in the range `[-rank(input), rank(input))`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "squeeze_dims", squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("where")
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are vectors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export("reverse_sequence")
@deprecation.deprecated_args(
None, "seq_dim is deprecated, use seq_axis instead", "seq_dim")
@deprecation.deprecated_args(
None, "batch_dim is deprecated, use batch_axis instead", "batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
@tf_export("gather")
def gather(params, indices, validate_indices=None, name=None, axis=0):
del validate_indices
if axis != 0:
# Note that we do a sparse_read here to avoid snapshotting the entire
# resource variable and doing a gather, which can be inefficient and lead to
# subtle race conditions. TODO(apassos) implement axis != 0 on sparse_read
return gen_array_ops.gather_v2(params, indices, axis, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables without
# introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
gather.__doc__ = gen_array_ops.gather_v2.__doc__
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
@tf_export("quantize_v2")
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantize` instead.")
def quantize_v2(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO"):
return gen_array_ops.quantize_v2(input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode)
quantize_v2.__doc__ = """Please use `tf.quantize` instead."""
# We want to expose tf.quantize instead of tf.quantize_v2; we can deprecate
# tf.quantize_v2 in next version of TensorFlow.
@tf_export("quantize")
def quantize(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None):
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name)
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
|
utecuy/edx-platform | refs/heads/master | lms/djangoapps/commerce/api/v1/tests/test_models.py | 127 | """ Tests for models. """
import ddt
from django.test import TestCase
from commerce.api.v1.models import Course
from course_modes.models import CourseMode
@ddt.ddt
class CourseTests(TestCase):
""" Tests for Course model. """
def setUp(self):
super(CourseTests, self).setUp()
self.course = Course('a/b/c', [])
@ddt.unpack
@ddt.data(
('credit', 'Credit'),
('professional', 'Professional Education'),
('no-id-professional', 'Professional Education'),
('verified', 'Verified Certificate'),
('honor', 'Honor Certificate'),
('audit', 'Audit'),
)
def test_get_mode_display_name(self, slug, expected_display_name):
""" Verify the method properly maps mode slugs to display names. """
mode = CourseMode(mode_slug=slug)
self.assertEqual(self.course.get_mode_display_name(mode), expected_display_name)
def test_get_mode_display_name_unknown_slug(self):
""" Verify the method returns the slug if it has no known mapping. """
mode = CourseMode(mode_slug='Blah!')
self.assertEqual(self.course.get_mode_display_name(mode), mode.mode_slug)
|
insomnia-lab/calibre | refs/heads/master | src/calibre/utils/Zeroconf.py | 5 | """ Multicast DNS Service Discovery for Python
Copyright (C) 2003, Paul Scott-Murphy
Copyright (C) 2009, Alexander Solovyov
This module provides a framework for the use of DNS Service Discovery
using IP multicast. It has been tested against the JRendezvous
implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
against the mDNSResponder from Mac OS X 10.3.8, 10.5.6, and against
Avahi library under various Linux distributions.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
"""
"""0.13 update - fix IPv6 support
some cleanups in code"""
"""0.12 update - allow selection of binding interface
typo fix - Thanks A. M. Kuchlingi
removed all use of word 'Rendezvous' - this is an API change"""
"""0.11 update - correction to comments for addListener method
support for new record types seen from OS X
- IPv6 address
- hostinfo
ignore unknown DNS record types
fixes to name decoding
works alongside other processes using port 5353 (e.g. on Mac OS X)
tested against Mac OS X 10.3.2's mDNSResponder
corrections to removal of list entries for service browser"""
"""0.10 update - Jonathon Paisley contributed these corrections:
always multicast replies, even when query is unicast
correct a pointer encoding problem
can now write records in any order
traceback shown on failure
better TXT record parsing
server is now separate from name
can cancel a service browser
modified some unit tests to accommodate these changes"""
"""0.09 update - remove all records on service unregistration
fix DOS security problem with readName"""
"""0.08 update - changed licensing to LGPL"""
"""0.07 update - faster shutdown on engine
pointer encoding of outgoing names
ServiceBrowser now works
new unit tests"""
"""0.06 update - small improvements with unit tests
added defined exception types
new style objects
fixed hostname/interface problem
fixed socket timeout problem
fixed addServiceListener() typo bug
using select() for socket reads
tested on Debian unstable with Python 2.2.2"""
"""0.05 update - ensure case insensitivty on domain names
support for unicast DNS queries"""
"""0.04 update - added some unit tests
added __ne__ adjuncts where required
ensure names end in '.local.'
timeout on receiving socket for clean shutdown"""
__author__ = "Paul Scott-Murphy"
__email__ = "paul at scott dash murphy dot com"
__version__ = "0.12"
import string
import time
import struct
import socket
import threading
import select
import traceback
__all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
# hook for threads
globals()['_GLOBAL_DONE'] = 0
# Some timing constants
_UNREGISTER_TIME = 125
_CHECK_TIME = 175
_REGISTER_TIME = 225
_LISTENER_TIME = 200
_BROWSER_TIME = 500
# Some DNS constants
_MDNS_ADDR = '224.0.0.251'
_MDNS_PORT = 5353;
_DNS_PORT = 53;
_DNS_TTL = 60 * 60; # one hour default TTL
_MAX_MSG_TYPICAL = 1460 # unused
_MAX_MSG_ABSOLUTE = 8972
_FLAGS_QR_MASK = 0x8000 # query response mask
_FLAGS_QR_QUERY = 0x0000 # query
_FLAGS_QR_RESPONSE = 0x8000 # response
_FLAGS_AA = 0x0400 # Authorative answer
_FLAGS_TC = 0x0200 # Truncated
_FLAGS_RD = 0x0100 # Recursion desired
_FLAGS_RA = 0x8000 # Recursion available
_FLAGS_Z = 0x0040 # Zero
_FLAGS_AD = 0x0020 # Authentic data
_FLAGS_CD = 0x0010 # Checking disabled
_CLASS_IN = 1
_CLASS_CS = 2
_CLASS_CH = 3
_CLASS_HS = 4
_CLASS_NONE = 254
_CLASS_ANY = 255
_CLASS_MASK = 0x7FFF
_CLASS_UNIQUE = 0x8000
_TYPE_A = 1
_TYPE_NS = 2
_TYPE_MD = 3
_TYPE_MF = 4
_TYPE_CNAME = 5
_TYPE_SOA = 6
_TYPE_MB = 7
_TYPE_MG = 8
_TYPE_MR = 9
_TYPE_NULL = 10
_TYPE_WKS = 11
_TYPE_PTR = 12
_TYPE_HINFO = 13
_TYPE_MINFO = 14
_TYPE_MX = 15
_TYPE_TXT = 16
_TYPE_AAAA = 28
_TYPE_SRV = 33
_TYPE_ANY = 255
# Mapping constants to names
_CLASSES = { _CLASS_IN : "in",
_CLASS_CS : "cs",
_CLASS_CH : "ch",
_CLASS_HS : "hs",
_CLASS_NONE : "none",
_CLASS_ANY : "any" }
_TYPES = { _TYPE_A : "a",
_TYPE_NS : "ns",
_TYPE_MD : "md",
_TYPE_MF : "mf",
_TYPE_CNAME : "cname",
_TYPE_SOA : "soa",
_TYPE_MB : "mb",
_TYPE_MG : "mg",
_TYPE_MR : "mr",
_TYPE_NULL : "null",
_TYPE_WKS : "wks",
_TYPE_PTR : "ptr",
_TYPE_HINFO : "hinfo",
_TYPE_MINFO : "minfo",
_TYPE_MX : "mx",
_TYPE_TXT : "txt",
_TYPE_AAAA : "quada",
_TYPE_SRV : "srv",
_TYPE_ANY : "any" }
# utility functions
def currentTimeMillis():
"""Current system time in milliseconds"""
return time.time() * 1000
def ntop(address):
"""Convert address to its string representation"""
af = len(address) == 4 and socket.AF_INET or socket.AF_INET6
return socket.inet_ntop(af, address)
def address_type(address):
"""Return appropriate record type for an address"""
return len(address) == 4 and _TYPE_A or _TYPE_AAAA
# Exceptions
class NonLocalNameException(Exception):
pass
class NonUniqueNameException(Exception):
pass
class NamePartTooLongException(Exception):
pass
class AbstractMethodException(Exception):
pass
class BadTypeInNameException(Exception):
pass
class BadDomainName(Exception):
def __init__(self, pos):
Exception.__init__(self, "at position " + str(pos))
class BadDomainNameCircular(BadDomainName):
pass
# implementation classes
class DNSEntry(object):
"""A DNS entry"""
def __init__(self, name, type, clazz):
self.key = string.lower(name)
self.name = name
self.type = type
self.clazz = clazz & _CLASS_MASK
self.unique = (clazz & _CLASS_UNIQUE) != 0
def __eq__(self, other):
"""Equality test on name, type, and class"""
if isinstance(other, DNSEntry):
return self.name == other.name and self.type == other.type and self.clazz == other.clazz
return 0
def __ne__(self, other):
"""Non-equality test"""
return not self.__eq__(other)
def getClazz(self, clazz):
"""Class accessor"""
try:
return _CLASSES[clazz]
except:
return "?(%s)" % (clazz)
def getType(self, type):
"""Type accessor"""
try:
return _TYPES[type]
except:
return "?(%s)" % (type)
def toString(self, hdr, other):
"""String representation with additional information"""
result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
if self.unique:
result += "-unique,"
else:
result += ","
result += self.name
if other is not None:
result += ",%s]" % (other)
else:
result += "]"
return result
class DNSQuestion(DNSEntry):
"""A DNS question entry"""
def __init__(self, name, type, clazz):
if not name.endswith(".local."):
raise NonLocalNameException(name)
DNSEntry.__init__(self, name, type, clazz)
def answeredBy(self, rec):
"""Returns true if the question is answered by the record"""
return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
def __repr__(self):
"""String representation"""
return DNSEntry.toString(self, "question", None)
class DNSRecord(DNSEntry):
"""A DNS record - like a DNS entry, but has a TTL"""
def __init__(self, name, type, clazz, ttl):
DNSEntry.__init__(self, name, type, clazz)
self.ttl = ttl
self.created = currentTimeMillis()
def __eq__(self, other):
"""Tests equality as per DNSRecord"""
if isinstance(other, DNSRecord):
return DNSEntry.__eq__(self, other)
return 0
def suppressedBy(self, msg):
"""Returns true if any answer in a message can suffice for the
information held in this record."""
for record in msg.answers:
if self.suppressedByAnswer(record):
return 1
return 0
def suppressedByAnswer(self, other):
"""Returns true if another record has same name, type and class,
and if its TTL is at least half of this record's."""
if self == other and other.ttl > (self.ttl / 2):
return 1
return 0
def getExpirationTime(self, percent):
"""Returns the time at which this record will have expired
by a certain percentage."""
return self.created + (percent * self.ttl * 10)
def getRemainingTTL(self, now):
"""Returns the remaining TTL in seconds."""
return max(0, (self.getExpirationTime(100) - now) / 1000)
def isExpired(self, now):
"""Returns true if this record has expired."""
return self.getExpirationTime(100) <= now
def isStale(self, now):
"""Returns true if this record is at least half way expired."""
return self.getExpirationTime(50) <= now
def resetTTL(self, other):
"""Sets this record's TTL and created time to that of
another record."""
self.created = other.created
self.ttl = other.ttl
def write(self, out):
"""Abstract method"""
raise AbstractMethodException
def toString(self, other):
"""String representation with addtional information"""
arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
return DNSEntry.toString(self, "record", arg)
class DNSAddress(DNSRecord):
"""A DNS address record"""
def __init__(self, name, type, clazz, ttl, address):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.address = address
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.address, len(self.address))
def __eq__(self, other):
"""Tests equality on address"""
if isinstance(other, DNSAddress):
return self.address == other.address
return 0
def __repr__(self):
"""String representation"""
try:
return 'record[%s]' % ntop(self.address)
except:
return 'record[%s]' % self.address
class DNSHinfo(DNSRecord):
"""A DNS host information record"""
def __init__(self, name, type, clazz, ttl, cpu, os):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.cpu = cpu
self.os = os
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.cpu, len(self.cpu))
out.writeString(self.os, len(self.os))
def __eq__(self, other):
"""Tests equality on cpu and os"""
if isinstance(other, DNSHinfo):
return self.cpu == other.cpu and self.os == other.os
return 0
def __repr__(self):
"""String representation"""
return self.cpu + " " + self.os
class DNSPointer(DNSRecord):
"""A DNS pointer record"""
def __init__(self, name, type, clazz, ttl, alias):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.alias = alias
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeName(self.alias)
def __eq__(self, other):
"""Tests equality on alias"""
if isinstance(other, DNSPointer):
return self.alias == other.alias
return 0
def __repr__(self):
"""String representation"""
return self.toString(self.alias)
class DNSText(DNSRecord):
"""A DNS text record"""
def __init__(self, name, type, clazz, ttl, text):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.text = text
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeString(self.text, len(self.text))
def __eq__(self, other):
"""Tests equality on text"""
if isinstance(other, DNSText):
return self.text == other.text
return 0
def __repr__(self):
"""String representation"""
if len(self.text) > 10:
return self.toString(self.text[:7] + "...")
else:
return self.toString(self.text)
class DNSService(DNSRecord):
"""A DNS service record"""
def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
DNSRecord.__init__(self, name, type, clazz, ttl)
self.priority = priority
self.weight = weight
self.port = port
self.server = server
def write(self, out):
"""Used in constructing an outgoing packet"""
out.writeShort(self.priority)
out.writeShort(self.weight)
out.writeShort(self.port)
out.writeName(self.server)
def __eq__(self, other):
"""Tests equality on priority, weight, port and server"""
if isinstance(other, DNSService):
return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
return 0
def __repr__(self):
"""String representation"""
return self.toString("%s:%s" % (self.server, self.port))
class DNSIncoming(object):
"""Object representation of an incoming DNS packet"""
def __init__(self, data):
"""Constructor from string holding bytes of packet"""
self.offset = 0
self.data = data
self.questions = []
self.answers = []
self.numQuestions = 0
self.numAnswers = 0
self.numAuthorities = 0
self.numAdditionals = 0
self.readHeader()
self.readQuestions()
self.readOthers()
def readHeader(self):
"""Reads header portion of packet"""
format = '!HHHHHH'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
self.id = info[0]
self.flags = info[1]
self.numQuestions = info[2]
self.numAnswers = info[3]
self.numAuthorities = info[4]
self.numAdditionals = info[5]
def readQuestions(self):
"""Reads questions section of packet"""
format = '!HH'
length = struct.calcsize(format)
for i in range(0, self.numQuestions):
name = self.readName()
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
try:
question = DNSQuestion(name, info[0], info[1])
self.questions.append(question)
except NonLocalNameException:
pass
def readInt(self):
"""Reads an integer from the packet"""
format = '!I'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
return info[0]
def readCharacterString(self):
"""Reads a character string from the packet"""
length = ord(self.data[self.offset])
self.offset += 1
return self.readString(length)
def readString(self, len):
"""Reads a string of a given length from the packet"""
format = '!' + str(len) + 's'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
return info[0]
def readUnsignedShort(self):
"""Reads an unsigned short from the packet"""
format = '!H'
length = struct.calcsize(format)
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
return info[0]
def readOthers(self):
"""Reads the answers, authorities and additionals section of the packet"""
format = '!HHiH'
length = struct.calcsize(format)
n = self.numAnswers + self.numAuthorities + self.numAdditionals
for i in range(0, n):
domain = self.readName()
info = struct.unpack(format, self.data[self.offset:self.offset+length])
self.offset += length
rec = None
if info[0] == _TYPE_A:
rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
elif info[0] == _TYPE_TXT:
rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
elif info[0] == _TYPE_SRV:
rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
elif info[0] == _TYPE_HINFO:
rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
elif info[0] == _TYPE_AAAA:
rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
else:
# Skip unknown record type (using DNS length field)
self.offset += info[3]
if rec is not None:
self.answers.append(rec)
def isQuery(self):
"""Returns true if this is a query"""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
def isResponse(self):
"""Returns true if this is a response"""
return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
def readUTF(self, offset, len):
"""Reads a UTF-8 string of a given length from the packet"""
return self.data[offset:offset+len].decode('utf-8')
def readName(self):
"""Reads a domain name from the packet"""
result = ''
off = self.offset
next = -1
first = off
while 1:
len = ord(self.data[off])
off += 1
if len == 0:
break
t = len & 0xC0
if t == 0x00:
result = ''.join((result, self.readUTF(off, len) + '.'))
off += len
elif t == 0xC0:
if next < 0:
next = off + 1
off = ((len & 0x3F) << 8) | ord(self.data[off])
if off >= first:
raise BadDomainNameCircular(off)
first = off
else:
raise BadDomainName(off)
if next >= 0:
self.offset = next
else:
self.offset = off
return result
class DNSOutgoing(object):
"""Object representation of an outgoing packet"""
def __init__(self, flags, multicast = 1):
self.finished = 0
self.id = 0
self.multicast = multicast
self.flags = flags
self.names = {}
self.data = []
self.size = 12
self.questions = []
self.answers = []
self.authorities = []
self.additionals = []
def addQuestion(self, record):
"""Adds a question"""
self.questions.append(record)
def addAnswer(self, inp, record):
"""Adds an answer"""
if not record.suppressedBy(inp):
self.addAnswerAtTime(record, 0)
def addAnswerAtTime(self, record, now):
"""Adds an answer if if does not expire by a certain time"""
if record is not None:
if now == 0 or not record.isExpired(now):
self.answers.append((record, now))
def addAuthorativeAnswer(self, record):
"""Adds an authoritative answer"""
self.authorities.append(record)
def addAdditionalAnswer(self, record):
"""Adds an additional answer"""
self.additionals.append(record)
def writeByte(self, value):
"""Writes a single byte to the packet"""
format = '!c'
self.data.append(struct.pack(format, chr(value)))
self.size += 1
def insertShort(self, index, value):
"""Inserts an unsigned short in a certain position in the packet"""
format = '!H'
self.data.insert(index, struct.pack(format, value))
self.size += 2
def writeShort(self, value):
"""Writes an unsigned short to the packet"""
format = '!H'
self.data.append(struct.pack(format, value))
self.size += 2
def writeInt(self, value):
"""Writes an unsigned integer to the packet"""
format = '!I'
self.data.append(struct.pack(format, int(value)))
self.size += 4
def writeString(self, value, length):
"""Writes a string to the packet"""
format = '!' + str(length) + 's'
self.data.append(struct.pack(format, value))
self.size += length
def writeUTF(self, s):
"""Writes a UTF-8 string of a given length to the packet"""
utfstr = s.encode('utf-8')
length = len(utfstr)
if length > 64:
raise NamePartTooLongException
self.writeByte(length)
self.writeString(utfstr, length)
def writeName(self, name):
"""Writes a domain name to the packet"""
try:
# Find existing instance of this name in packet
#
index = self.names[name]
except KeyError:
# No record of this name already, so write it
# out as normal, recording the location of the name
# for future pointers to it.
#
self.names[name] = self.size
parts = name.split('.')
if parts[-1] == '':
parts = parts[:-1]
for part in parts:
self.writeUTF(part)
self.writeByte(0)
return
# An index was found, so write a pointer to it
#
self.writeByte((index >> 8) | 0xC0)
self.writeByte(index)
def writeQuestion(self, question):
"""Writes a question to the packet"""
self.writeName(question.name)
self.writeShort(question.type)
self.writeShort(question.clazz)
def writeRecord(self, record, now):
"""Writes a record (answer, authoritative answer, additional) to
the packet"""
self.writeName(record.name)
self.writeShort(record.type)
if record.unique and self.multicast:
self.writeShort(record.clazz | _CLASS_UNIQUE)
else:
self.writeShort(record.clazz)
if now == 0:
self.writeInt(record.ttl)
else:
self.writeInt(record.getRemainingTTL(now))
index = len(self.data)
# Adjust size for the short we will write before this record
#
self.size += 2
record.write(self)
self.size -= 2
length = len(''.join(self.data[index:]))
self.insertShort(index, length) # Here is the short we adjusted for
def packet(self):
"""Returns a string containing the packet's bytes
No further parts should be added to the packet once this
is done."""
if not self.finished:
self.finished = 1
for question in self.questions:
self.writeQuestion(question)
for answer, time in self.answers:
self.writeRecord(answer, time)
for authority in self.authorities:
self.writeRecord(authority, 0)
for additional in self.additionals:
self.writeRecord(additional, 0)
self.insertShort(0, len(self.additionals))
self.insertShort(0, len(self.authorities))
self.insertShort(0, len(self.answers))
self.insertShort(0, len(self.questions))
self.insertShort(0, self.flags)
if self.multicast:
self.insertShort(0, 0)
else:
self.insertShort(0, self.id)
return ''.join(self.data)
class DNSCache(object):
"""A cache of DNS entries"""
def __init__(self):
self.cache = {}
def add(self, entry):
"""Adds an entry"""
try:
list = self.cache[entry.key]
except:
list = self.cache[entry.key] = []
list.append(entry)
def remove(self, entry):
"""Removes an entry"""
try:
list = self.cache[entry.key]
list.remove(entry)
except:
pass
def get(self, entry):
"""Gets an entry by key. Will return None if there is no
matching entry."""
try:
list = self.cache[entry.key]
return list[list.index(entry)]
except:
return None
def getByDetails(self, name, type, clazz):
"""Gets an entry by details. Will return None if there is
no matching entry."""
entry = DNSEntry(name, type, clazz)
return self.get(entry)
def entriesWithName(self, name):
"""Returns a list of entries whose key matches the name."""
try:
return self.cache[name]
except:
return []
def entries(self):
"""Returns a list of all entries"""
def add(x, y): return x+y
try:
return reduce(add, self.cache.values())
except:
return []
class Engine(threading.Thread):
"""An engine wraps read access to sockets, allowing objects that
need to receive data from sockets to be called back when the
sockets are ready.
A reader needs a handle_read() method, which is called when the socket
it is interested in is ready for reading.
Writers are not implemented here, because we only send short
packets.
"""
def __init__(self, zeroconf):
threading.Thread.__init__(self)
self.zeroconf = zeroconf
self.readers = {} # maps socket to reader
self.timeout = 5
self.condition = threading.Condition()
self.setDaemon(True) # By Kovid
self.start()
def run(self):
while not globals()['_GLOBAL_DONE']:
rs = self.getReaders()
if len(rs) == 0:
# No sockets to manage, but we wait for the timeout
# or addition of a socket
#
self.condition.acquire()
self.condition.wait(self.timeout)
self.condition.release()
else:
from calibre.constants import DEBUG
try:
rr, wr, er = select.select(rs, [], [], self.timeout)
if globals()['_GLOBAL_DONE']:
continue
for socket in rr:
try:
self.readers[socket].handle_read()
except:
if DEBUG:
traceback.print_exc()
except:
pass
def getReaders(self):
self.condition.acquire()
result = self.readers.keys()
self.condition.release()
return result
def addReader(self, reader, socket):
self.condition.acquire()
self.readers[socket] = reader
self.condition.notify()
self.condition.release()
def delReader(self, socket):
self.condition.acquire()
del(self.readers[socket])
self.condition.notify()
self.condition.release()
def notify(self):
self.condition.acquire()
self.condition.notify()
self.condition.release()
class Listener(object):
"""A Listener is used by this module to listen on the multicast
group to which DNS messages are sent, allowing the implementation
to cache information as it arrives.
It requires registration with an Engine object in order to have
the read() method called when a socket is availble for reading."""
def __init__(self, zeroconf):
self.zeroconf = zeroconf
self.zeroconf.engine.addReader(self, self.zeroconf.socket)
def handle_read(self):
data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
self.data = data
msg = DNSIncoming(data)
if msg.isQuery():
# Always multicast responses
#
if port == _MDNS_PORT:
self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
# If it's not a multicast query, reply via unicast
# and multicast
#
elif port == _DNS_PORT:
self.zeroconf.handleQuery(msg, addr, port)
self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
else:
self.zeroconf.handleResponse(msg)
class Reaper(threading.Thread):
"""A Reaper is used by this module to remove cache entries that
have expired."""
def __init__(self, zeroconf):
threading.Thread.__init__(self)
self.setDaemon(True) # By Kovid
self.zeroconf = zeroconf
self.start()
def run(self):
while 1:
try:
self.zeroconf.wait(10 * 1000)
except TypeError: # By Kovid
globals()['_GLOBAL_DONE'] = 1
return
if globals()['_GLOBAL_DONE']:
return
try:
# can get here in a race condition with shutdown. Swallow the
# exception and run around the loop again.
now = currentTimeMillis()
for record in self.zeroconf.cache.entries():
if record.isExpired(now):
self.zeroconf.updateRecord(now, record)
self.zeroconf.cache.remove(record)
except:
pass
class ServiceBrowser(threading.Thread):
"""Used to browse for a service of a specific type.
The listener object will have its addService() and
removeService() methods called when this browser
discovers changes in the services availability."""
def __init__(self, zeroconf, type, listener):
"""Creates a browser for a specific type"""
threading.Thread.__init__(self)
self.zeroconf = zeroconf
self.type = type
self.listener = listener
self.services = {}
self.nextTime = currentTimeMillis()
self.delay = _BROWSER_TIME
self.list = []
self.done = 0
self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
self.start()
def updateRecord(self, zeroconf, now, record):
"""Callback invoked by Zeroconf when new information arrives.
Updates information required by browser in the Zeroconf cache."""
if record.type == _TYPE_PTR and record.name == self.type:
expired = record.isExpired(now)
try:
oldrecord = self.services[record.alias.lower()]
if not expired:
oldrecord.resetTTL(record)
else:
del(self.services[record.alias.lower()])
callback = lambda x: self.listener.removeService(x, self.type, record.alias)
self.list.append(callback)
return
except:
if not expired:
self.services[record.alias.lower()] = record
callback = lambda x: self.listener.addService(x, self.type, record.alias)
self.list.append(callback)
expires = record.getExpirationTime(75)
if expires < self.nextTime:
self.nextTime = expires
def cancel(self):
self.done = 1
self.zeroconf.notifyAll()
def run(self):
while 1:
event = None
now = currentTimeMillis()
if len(self.list) == 0 and self.nextTime > now:
self.zeroconf.wait(self.nextTime - now)
if globals()['_GLOBAL_DONE'] or self.done:
return
now = currentTimeMillis()
if self.nextTime <= now:
out = DNSOutgoing(_FLAGS_QR_QUERY)
out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
for record in self.services.values():
if not record.isExpired(now):
out.addAnswerAtTime(record, now)
self.zeroconf.send(out)
self.nextTime = now + self.delay
self.delay = min(20 * 1000, self.delay * 2)
if len(self.list) > 0:
event = self.list.pop(0)
if event is not None:
event(self.zeroconf)
class ServiceInfo(object):
"""Service information"""
def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
"""Create a service description.
type: fully qualified service type name
name: fully qualified service name
address: IP address as unsigned short, network byte order
port: port that the service runs on
weight: weight of the service
priority: priority of the service
properties: dictionary of properties (or a string holding the bytes for the text field)
server: fully qualified name for service host (defaults to name)"""
if not name.endswith(type):
raise BadTypeInNameException
self.type = type
self.name = name
self.address = address
if address:
self.ip_type = address_type(address)
self.port = port
self.weight = weight
self.priority = priority
if server:
self.server = server
else:
self.server = name
self.setProperties(properties)
def setProperties(self, properties):
"""Sets properties and text of this info from a dictionary"""
if isinstance(properties, dict):
self.properties = properties
list = []
result = ''
for key in properties:
value = properties[key]
if value is None:
suffix = ''
elif isinstance(value, str):
suffix = value
elif isinstance(value, int):
suffix = value and 'true' or 'false'
else:
suffix = ''
list.append('='.join((key, suffix)))
for item in list:
result = ''.join((result, struct.pack('!c', chr(len(item))), item))
self.text = result
else:
self.text = properties
def setText(self, text):
"""Sets properties and text given a text field"""
self.text = text
try:
result = {}
end = len(text)
index = 0
strs = []
while index < end:
length = ord(text[index])
index += 1
strs.append(text[index:index+length])
index += length
for s in strs:
eindex = s.find('=')
if eindex == -1:
# No equals sign at all
key = s
value = 0
else:
key = s[:eindex]
value = s[eindex+1:]
if value == 'true':
value = 1
elif value == 'false' or not value:
value = 0
# Only update non-existent properties
if key and result.get(key) == None:
result[key] = value
self.properties = result
except:
traceback.print_exc()
self.properties = None
def getType(self):
"""Type accessor"""
return self.type
def getName(self):
"""Name accessor"""
if self.type is not None and self.name.endswith("." + self.type):
return self.name[:len(self.name) - len(self.type) - 1]
return self.name
def getAddress(self):
"""Address accessor"""
return self.address
def getPort(self):
"""Port accessor"""
return self.port
def getPriority(self):
"""Pirority accessor"""
return self.priority
def getWeight(self):
"""Weight accessor"""
return self.weight
def getProperties(self):
"""Properties accessor"""
return self.properties
def getText(self):
"""Text accessor"""
return self.text
def getServer(self):
"""Server accessor"""
return self.server
def updateRecord(self, zeroconf, now, record):
"""Updates service information from a DNS record"""
if record is None or record.isExpired(now):
return
if (record.type in (_TYPE_A, _TYPE_AAAA) and
record.name == self.server):
self.address = record.address
elif record.type == _TYPE_SRV and record.name == self.name:
self.server = record.server
self.port = record.port
self.weight = record.weight
self.priority = record.priority
self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
elif record.type == _TYPE_TXT and record.name == self.name:
self.setText(record.text)
def request(self, zeroconf, timeout):
"""Returns true if the service could be discovered on the
network, and updates this object with details discovered.
"""
now = currentTimeMillis()
delay = _LISTENER_TIME
next = now + delay
last = now + timeout
result = 0
try:
zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
while self.server is None or self.address is None or self.text is None:
if last <= now:
return 0
if next <= now:
out = DNSOutgoing(_FLAGS_QR_QUERY)
out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
if self.server is not None:
out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
zeroconf.send(out)
next = now + delay
delay = delay * 2
zeroconf.wait(min(next, last) - now)
now = currentTimeMillis()
result = 1
finally:
zeroconf.removeListener(self)
return result
def __eq__(self, other):
"""Tests equality of service name"""
if isinstance(other, ServiceInfo):
return other.name == self.name
return 0
def __ne__(self, other):
"""Non-equality test"""
return not self.__eq__(other)
def __repr__(self):
"""String representation"""
result = "service[%s,%s:%s," % (self.name, ntop(self.getAddress()), self.port)
if self.text is None:
result += "None"
else:
if len(self.text) < 20:
result += self.text
else:
result += self.text[:17] + "..."
result += "]"
return result
class Zeroconf(object):
"""Implementation of Zeroconf Multicast DNS Service Discovery
Supports registration, unregistration, queries and browsing.
"""
def __init__(self, bindaddress=None):
"""Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads."""
globals()['_GLOBAL_DONE'] = 0
if bindaddress is None:
self.intf = socket.gethostbyname(socket.gethostname())
else:
self.intf = bindaddress
self.group = ('', _MDNS_PORT)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except:
# SO_REUSEADDR should be equivalent to SO_REUSEPORT for
# multicast UDP sockets (p 731, "TCP/IP Illustrated,
# Volume 2"), but some BSD-derived systems require
# SO_REUSEPORT to be specified explicity. Also, not all
# versions of Python have SO_REUSEPORT available. So
# if you're on a BSD-based system, and haven't upgraded
# to Python 2.3 yet, you may find this library doesn't
# work as expected.
#
pass
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
try:
self.socket.bind(self.group)
except:
# Some versions of linux raise an exception even though
# the SO_REUSE* options have been set, so ignore it
#
pass
#self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
self.listeners = []
self.browsers = []
self.services = {}
self.servicetypes = {}
self.cache = DNSCache()
self.condition = threading.Condition()
self.engine = Engine(self)
self.listener = Listener(self)
self.reaper = Reaper(self)
def isLoopback(self):
return self.intf.startswith("127.0.0.1")
def isLinklocal(self):
return self.intf.startswith("169.254.")
def wait(self, timeout):
"""Calling thread waits for a given number of milliseconds or
until notified."""
self.condition.acquire()
self.condition.wait(timeout/1000)
self.condition.release()
def notifyAll(self):
"""Notifies all waiting threads"""
self.condition.acquire()
self.condition.notifyAll()
self.condition.release()
def getServiceInfo(self, type, name, timeout=3000):
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds."""
info = ServiceInfo(type, name)
if info.request(self, timeout):
return info
return None
def addServiceListener(self, type, listener):
"""Adds a listener for a particular service type. This object
will then have its updateRecord method called when information
arrives for that type."""
self.removeServiceListener(listener)
self.browsers.append(ServiceBrowser(self, type, listener))
def removeServiceListener(self, listener):
"""Removes a listener from the set that is currently listening."""
for browser in self.browsers:
if browser.listener == listener:
browser.cancel()
del(browser)
def registerService(self, info, ttl=_DNS_TTL):
"""Registers service information to the network with a default TTL
of 60 seconds. Zeroconf will then respond to requests for
information for that service. The name of the service may be
changed if needed to make it unique on the network."""
self.checkService(info)
self.services[info.name.lower()] = info
if self.servicetypes.has_key(info.type):
self.servicetypes[info.type]+=1
else:
self.servicetypes[info.type]=1
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, info.ip_type, _CLASS_IN, ttl, info.address), 0)
self.send(out)
i += 1
nextTime += _REGISTER_TIME
def unregisterService(self, info):
"""Unregister a service."""
try:
del(self.services[info.name.lower()])
if self.servicetypes[info.type]>1:
self.servicetypes[info.type]-=1
else:
del self.servicetypes[info.type]
except:
pass
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, info.ip_type, _CLASS_IN, 0, info.address), 0)
self.send(out)
i += 1
nextTime += _UNREGISTER_TIME
def unregisterAllServices(self):
"""Unregister all registered services."""
if not self.services:
return
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for info in self.services.values():
out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
if info.address:
out.addAnswerAtTime(DNSAddress(info.server, info.ip_type, _CLASS_IN, 0, info.address), 0)
self.send(out)
i += 1
nextTime += _UNREGISTER_TIME
def countRegisteredServices(self):
return len(self.services)
def checkService(self, info):
"""Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique."""
now = currentTimeMillis()
nextTime = now
i = 0
while i < 3:
for record in self.cache.entriesWithName(info.type):
if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
if (info.name.find('.') < 0):
info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
self.checkService(info)
return
raise NonUniqueNameException
if now < nextTime:
self.wait(nextTime - now)
now = currentTimeMillis()
continue
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
self.debug = out
out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
self.send(out)
i += 1
nextTime += _CHECK_TIME
def addListener(self, listener, question):
"""Adds a listener for a given question. The listener will have
its updateRecord method called when information is available to
answer the question."""
now = currentTimeMillis()
self.listeners.append(listener)
if question is not None:
for record in self.cache.entriesWithName(question.name):
if question.answeredBy(record) and not record.isExpired(now):
listener.updateRecord(self, now, record)
self.notifyAll()
def removeListener(self, listener):
"""Removes a listener."""
try:
self.listeners.remove(listener)
self.notifyAll()
except:
pass
def updateRecord(self, now, rec):
"""Used to notify listeners of new information that has updated
a record."""
for listener in self.listeners:
listener.updateRecord(self, now, rec)
self.notifyAll()
def handleResponse(self, msg):
"""Deal with incoming response packets. All answers
are held in the cache, and listeners are notified."""
now = currentTimeMillis()
for record in msg.answers:
expired = record.isExpired(now)
if record in self.cache.entries():
if expired:
self.cache.remove(record)
else:
entry = self.cache.get(record)
if entry is not None:
entry.resetTTL(record)
record = entry
else:
self.cache.add(record)
self.updateRecord(now, record)
def handleQuery(self, msg, addr, port):
"""Deal with incoming query packets. Provides a response if
possible."""
out = None
# Support unicast client responses
#
if port != _MDNS_PORT:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
for question in msg.questions:
out.addQuestion(question)
for question in msg.questions:
if question.type == _TYPE_PTR:
if question.name == "_services._dns-sd._udp.local.":
for stype in self.servicetypes.keys():
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
for service in self.services.values():
if question.name == service.type:
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
else:
try:
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
# Answer A record queries for any service addresses we know
if question.type in (_TYPE_A, _TYPE_AAAA, _TYPE_ANY):
for service in self.services.values():
if service.server == question.name.lower():
out.addAnswer(msg, DNSAddress(question.name, address_type(service.address), _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
service = self.services.get(question.name.lower(), None)
if not service: continue
if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
if question.type == _TYPE_SRV:
out.addAdditionalAnswer(DNSAddress(service.server, address_type(service.address), _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
except:
traceback.print_exc()
if out is not None and out.answers:
out.id = msg.id
self.send(out, addr, port)
def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
"""Sends an outgoing packet."""
# This is a quick test to see if we can parse the packets we generate
#temp = DNSIncoming(out.packet())
try:
self.socket.sendto(out.packet(), 0, (addr, port))
except:
# Ignore this, it may be a temporary loss of network connection
pass
def close(self):
"""Ends the background threads, and prevent this instance from
servicing further queries."""
if globals()['_GLOBAL_DONE'] == 0:
globals()['_GLOBAL_DONE'] = 1
self.notifyAll()
self.engine.notify()
self.unregisterAllServices()
self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
self.socket.close()
# Test a few module features, including service registration, service
# query (for Zoe), and service unregistration.
if __name__ == '__main__':
print "Multicast DNS Service Discovery for Python, version", __version__
r = Zeroconf()
print "1. Testing registration of a service..."
desc = {'version':'0.10','a':'test value', 'b':'another value'}
info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
print " Registering service..."
r.registerService(info)
print " Registration done."
print "2. Testing query of service information..."
print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
print " Query done."
print "3. Testing query of own service..."
print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
print " Query done."
print "4. Testing unregister of service information..."
r.unregisterService(info)
print " Unregister done."
r.close()
|
selfcommit/gaedav | refs/heads/master | pyxml/sax/sax2exts.py | 4 | """
Various extensions to the core SAX 2.0 API.
$Id: sax2exts.py,v 1.5 2001/12/30 22:17:03 loewis Exp $
"""
import saxexts,saxlib
# In SAX2, validation is turned-on through a property. Make sure
# that all parsers returned from this factory are validating
class ValidatingReaderFactory(saxexts.ParserFactory):
def make_parser(self, parser_list = []):
p = saxexts.ParserFactory.make_parser(self,parser_list)
p.setFeature(saxlib.feature_validation, 1)
return p
# --- XMLReader factory
XMLReaderFactory = saxexts.ParserFactory
# --- Creating parser factories
XMLParserFactory = XMLReaderFactory(["pyxml.sax.drivers2.drv_pyexpat",
"pyxml.sax.drivers2.drv_xmlproc"])
XMLValParserFactory = ValidatingReaderFactory(["pyxml.sax.drivers2.drv_xmlproc"])
HTMLParserFactory=XMLReaderFactory(["pyxml.sax.drivers2.drv_htmllib",
"pyxml.sax.drivers2.drv_sgmlop",
"pyxml.sax.drivers2.drv_sgmllib"])
SGMLParserFactory=XMLReaderFactory(["pyxml.sax.drivers2.drv_sgmlop",
"pyxml.sax.drivers2.drv_sgmllib"])
def make_parser(parser_list = []):
return XMLParserFactory.make_parser(parser_list)
|
denisov-vlad/redash | refs/heads/master | redash/query_runner/google_analytics.py | 1 | import logging
from base64 import b64decode
from datetime import datetime
from urllib.parse import parse_qs, urlparse
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
logger = logging.getLogger(__name__)
try:
from oauth2client.service_account import ServiceAccountCredentials
from apiclient.discovery import build
from apiclient.errors import HttpError
import httplib2
enabled = True
except ImportError as e:
enabled = False
types_conv = dict(
STRING=TYPE_STRING,
INTEGER=TYPE_INTEGER,
FLOAT=TYPE_FLOAT,
DATE=TYPE_DATE,
DATETIME=TYPE_DATETIME,
)
def parse_ga_response(response):
columns = []
for h in response["columnHeaders"]:
if h["name"] in ("ga:date", "mcf:conversionDate"):
h["dataType"] = "DATE"
elif h["name"] == "ga:dateHour":
h["dataType"] = "DATETIME"
columns.append(
{
"name": h["name"],
"friendly_name": h["name"].split(":", 1)[1],
"type": types_conv.get(h["dataType"], "string"),
}
)
rows = []
for r in response.get("rows", []):
d = {}
for c, value in enumerate(r):
column_name = response["columnHeaders"][c]["name"]
column_type = [col for col in columns if col["name"] == column_name][0][
"type"
]
# mcf results come a bit different than ga results:
if isinstance(value, dict):
if "primitiveValue" in value:
value = value["primitiveValue"]
elif "conversionPathValue" in value:
steps = []
for step in value["conversionPathValue"]:
steps.append(
"{}:{}".format(step["interactionType"], step["nodeValue"])
)
value = ", ".join(steps)
else:
raise Exception("Results format not supported")
if column_type == TYPE_DATE:
value = datetime.strptime(value, "%Y%m%d")
elif column_type == TYPE_DATETIME:
if len(value) == 10:
value = datetime.strptime(value, "%Y%m%d%H")
elif len(value) == 12:
value = datetime.strptime(value, "%Y%m%d%H%M")
else:
raise Exception(
"Unknown date/time format in results: '{}'".format(value)
)
d[column_name] = value
rows.append(d)
return {"columns": columns, "rows": rows}
class GoogleAnalytics(BaseSQLQueryRunner):
should_annotate_query = False
@classmethod
def type(cls):
return "google_analytics"
@classmethod
def name(cls):
return "Google Analytics"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {"jsonKeyFile": {"type": "string", "title": "JSON Key File"}},
"required": ["jsonKeyFile"],
"secret": ["jsonKeyFile"],
}
def __init__(self, configuration):
super(GoogleAnalytics, self).__init__(configuration)
self.syntax = "json"
def _get_analytics_service(self):
scope = ["https://www.googleapis.com/auth/analytics.readonly"]
key = json_loads(b64decode(self.configuration["jsonKeyFile"]))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
return build("analytics", "v3", http=creds.authorize(httplib2.Http()), cache_discovery=False)
def _get_tables(self, schema):
accounts = (
self._get_analytics_service()
.management()
.accounts()
.list()
.execute()
.get("items")
)
if accounts is None:
raise Exception("Failed getting accounts.")
else:
for account in accounts:
schema[account["name"]] = {"name": account["name"], "columns": []}
properties = (
self._get_analytics_service()
.management()
.webproperties()
.list(accountId=account["id"])
.execute()
.get("items", [])
)
for property_ in properties:
if "defaultProfileId" in property_ and "name" in property_:
schema[account["name"]]["columns"].append(
"{0} (ga:{1})".format(
property_["name"], property_["defaultProfileId"]
)
)
return list(schema.values())
def test_connection(self):
try:
service = self._get_analytics_service()
service.management().accounts().list().execute()
except HttpError as e:
# Make sure we return a more readable error to the end user
raise Exception(e._get_reason())
def run_query(self, query, user):
logger.debug("Analytics is about to execute query: %s", query)
try:
params = json_loads(query)
except:
query_string = parse_qs(urlparse(query).query, keep_blank_values=True)
params = {k.replace('-', '_'): ",".join(v) for k,v in query_string.items()}
if "mcf:" in params["metrics"] and "ga:" in params["metrics"]:
raise Exception("Can't mix mcf: and ga: metrics.")
if "mcf:" in params.get("dimensions", "") and "ga:" in params.get(
"dimensions", ""
):
raise Exception("Can't mix mcf: and ga: dimensions.")
if "mcf:" in params["metrics"]:
api = self._get_analytics_service().data().mcf()
else:
api = self._get_analytics_service().data().ga()
if len(params) > 0:
try:
response = api.get(**params).execute()
data = parse_ga_response(response)
error = None
json_data = json_dumps(data)
except HttpError as e:
# Make sure we return a more readable error to the end user
error = e._get_reason()
json_data = None
else:
error = "Wrong query format."
json_data = None
return json_data, error
register(GoogleAnalytics)
|
frePPLe/frePPLe | refs/heads/master | freppledb/erpconnection/urls.py | 1 | #
# Copyright (C) 2017 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf.urls import url
from .views import Upload
# Automatically add these URLs when the application is installed
autodiscover = True
urlpatterns = [
# Model list reports, which override standard admin screens
url(r"^erp/upload/$", Upload, name="erp_upload")
]
|
nicopresto/webSkapes | refs/heads/master | tests/logout.py | 7 | from selenium import selenium
import unittest, time, re
class NewTest(unittest.TestCase):
def setUp(self):
self.verificationErrors = []
self.selenium = selenium("localhost", 4444, "*chrome", "http://change-this-to-the-site-you-are-testing/")
self.selenium.start()
def test_new(self):
sel = self.selenium
def tearDown(self):
self.selenium.stop()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
eonpatapon/nova | refs/heads/master | nova/virt/xenapi/client/session.py | 11 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import cPickle as pickle
import errno
import socket
import time
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import versionutils
from six.moves import range
from nova import context
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova import utils
from nova import version
from nova.virt.xenapi.client import objects as cli_objects
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
LOG = logging.getLogger(__name__)
xenapi_session_opts = [
cfg.IntOpt('login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
cfg.IntOpt('connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_session_opts, 'xenserver')
CONF.import_opt('host', 'nova.netconf')
def apply_session_helpers(session):
session.VM = cli_objects.VM(session)
session.SR = cli_objects.SR(session)
session.VDI = cli_objects.VDI(session)
session.VBD = cli_objects.VBD(session)
session.PBD = cli_objects.PBD(session)
session.PIF = cli_objects.PIF(session)
session.VLAN = cli_objects.VLAN(session)
session.host = cli_objects.Host(session)
session.network = cli_objects.Network(session)
session.pool = cli_objects.Pool(session)
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
# This is not a config option as it should only ever be
# changed in development environments.
# MAJOR VERSION: Incompatible changes with the plugins
# MINOR VERSION: Compatible changes, new plguins, etc
PLUGIN_REQUIRED_VERSION = '1.2'
def __init__(self, url, user, pw):
version_string = version.version_string_with_package()
self.nova_version = _('%(vendor)s %(product)s %(version)s') % \
{'vendor': version.vendor_string(),
'product': version.product_string(),
'version': version_string}
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.host_ref = self._get_host_ref()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._verify_plugin_version()
apply_session_helpers(self)
def _verify_plugin_version(self):
requested_version = self.PLUGIN_REQUIRED_VERSION
current_version = self.call_plugin_serialized(
'nova_plugin_version', 'get_version')
if not versionutils.is_compatible(requested_version, current_version):
raise self.XenAPI.Failure(
_("Plugin version mismatch (Expected %(exp)s, got %(got)s)") %
{'exp': requested_version, 'got': current_version})
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
except self.XenAPI.Failure as e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in range(CONF.xenserver.connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = objects.AggregateList.get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_LE('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand.
"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
# Product version is only set in some cases (e.g. XCP, XenServer) and
# not in others (e.g. xenserver-core, XAPI-XCP).
# In these cases, the platform version is the best number to use.
if product_version_str is None:
product_version_str = software_version.get('platform_version',
'0.0.0')
product_brand = software_version.get('product_brand')
product_version = utils.convert_version_to_tuple(product_version_str)
return product_version, product_brand
def _get_software_version(self):
return self.call_xenapi('host.get_software_version', self.host_ref)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def _get_host_ref(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
self.host_ref, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, retry_cb=None, *args,
**kwargs):
"""Allows a plugin to raise RetryableError so we can try again."""
attempts = num_retries + 1
sleep_time = 0.5
for attempt in range(1, attempts + 1):
try:
if attempt > 1:
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
callback_result = None
if callback:
callback_result = callback(kwargs)
msg = ('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d, '
'callback_result: %(callback_result)s')
LOG.debug(msg,
{'plugin': plugin, 'fn': fn, 'attempt': attempt,
'attempts': attempts,
'callback_result': callback_result})
return self.call_plugin_serialized(plugin, fn, *args, **kwargs)
except self.XenAPI.Failure as exc:
if self._is_retryable_exception(exc, fn):
LOG.warning(_LW('%(plugin)s.%(fn)s failed. '
'Retrying call.'),
{'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
except socket.error as exc:
if exc.errno == errno.ECONNRESET:
LOG.warning(_LW('Lost connection to XenAPI during call to '
'%(plugin)s.%(fn)s. Retrying call.'),
{'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
raise exception.PluginRetriesExceeded(num_retries=num_retries)
def _is_retryable_exception(self, exc, fn):
_type, method, error = exc.details[:3]
if error == 'RetryableError':
LOG.debug("RetryableError, so retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
elif "signal" in method:
LOG.debug("Error due to a signal, retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
else:
return False
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
self.is_local_connection = url == "unix://local"
if self.is_local_connection:
return self.XenAPI.xapi_local()
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug("Got exception: %s", exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.
params = eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug("Got exception: %s", exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
return self.call_xenapi('%s.get_all_records' % record_type).items()
|
Heufneutje/txircd | refs/heads/dev/next | txircd/modules/rfc/cmd_stats.py | 1 | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implementer
from typing import Any, Dict, List, Optional, Tuple
irc.ERR_NOSUCHXINFO = "772"
irc.RPL_XINFOENTRY = "773"
irc.RPL_XINFOEND = "774"
irc.RPL_XINFOTYPE = "775"
@implementer(IPlugin, IModuleData)
class StatsCommand(ModuleData):
name = "StatsCommand"
core = True
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("STATS", 1, UserStats(self.ircd)) ]
def serverCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("INFOREQ", 1, ServerInfoRequest(self.ircd)),
("INFO", 1, ServerInfo(self.ircd)),
("INFOEND", 1, ServerInfoEnd(self.ircd)) ]
def verifyConfig(self, config: Dict[str, Any]) -> None:
if "public_info" in config:
if not isinstance(config["public_info"], list):
raise ConfigValidationError("public_info", "value must be a list")
for info in config["public_info"]:
if not isinstance(info, str):
raise ConfigValidationError("public_info", "every entry must be a string")
@implementer(ICommand)
class UserStats(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
user.sendSingleError("StatsParams", irc.ERR_NEEDMOREPARAMS, "STATS", "Not enough parameters")
return None
typeName = params[0].lower()
if len(params) >= 2 and params[1] != self.ircd.name:
if params[1] not in self.ircd.serverNames:
user.sendSingleError("StatsServer", irc.ERR_NOSUCHSERVER, params[1], "No such server")
return None
return {
"type": typeName,
"server": self.ircd.serverNames[params[1]]
}
return {
"type": typeName
}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
typeName = data["type"]
if "server" in data:
server = data["server"]
server.sendMessage("INFOREQ", server.serverID, typeName, prefix=user.uuid)
return True
if typeName is None:
if self.ircd.runActionUntilValue("userhasoperpermission", user, "info-unknown", users=[user]):
user.sendMessage(irc.ERR_NOSUCHXINFO, typeName, "No such stats type available")
else:
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the operator permission to run stats {}".format(typeName))
return True
if not self.checkPermission(user, typeName):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the operator permission to run stats {}".format(typeName))
return True
results = self.ircd.runComboActionUntilValue((("statsruntype", (typeName,)), ("statsruntype-{}".format(typeName), ())), users=[user])
if results:
for key, val in results.items():
user.sendMessage(irc.RPL_XINFOENTRY, typeName, key, val)
# The spec technically allows more than one key/value pair on a line
# If we do that, we'll need to make sure that if there's a space in the value,
# it ends the line.
user.sendMessage(irc.RPL_XINFOEND, typeName, "End of STATS request")
return True
def checkPermission(self, user: "IRCUser", typeName: str) -> bool:
if typeName in self.ircd.config.get("public_info", []):
return True
if self.ircd.runActionUntilValue("userhasoperpermission", user, "info-{}".format(typeName.lower()), users=[user]):
return True
return False
@implementer(ICommand)
class ServerInfoRequest(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) != 2:
return None
if prefix not in self.ircd.users:
if prefix in self.ircd.recentlyQuitUsers:
return {
"lostuser": True
}
return None
if params[0] != self.ircd.serverID and params[0] not in self.ircd.servers:
if params[0] in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
return {
"user": self.ircd.users[prefix],
"server": params[0],
"type": params[1]
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostuser" in data or "lostserver" in data:
return True
serverID = data["server"]
typeName = data["type"]
if serverID == self.ircd.serverID:
user = data["user"]
destServer = self.ircd.servers[user.uuid[:3]]
results = self.ircd.runComboActionUntilValue((("statsruntype", (typeName,)), ("statsruntype-{}".format(typeName), ())), users=[user])
if results:
for key, val in results.items():
destServer.sendMessage("INFO", user.uuid, typeName, key, val, prefix=self.ircd.serverID)
destServer.sendMessage("INFOEND", user.uuid, typeName, prefix=self.ircd.serverID)
return True
nextServer = self.ircd.servers[serverID]
nextServer.sendMessage("INFOREQ", serverID, typeName, prefix=data["user"].uuid)
return True
@implementer(ICommand)
class ServerInfo(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) < 4 or len(params) % 2 != 0:
return None
if prefix not in self.ircd.servers:
if prefix in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
if params[0] not in self.ircd.users:
if params[0] in self.ircd.recentlyQuitUsers:
return {
"lostuser": True
}
return None
response = {}
for i in range(2, len(params), 2):
response[params[i]] = params[i+1]
return {
"user": self.ircd.users[params[0]],
"source": prefix,
"type": params[1],
"data": response
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostuser" in data or "lostserver" in data:
return True
typeName = data["type"]
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
sourceServerName = self.ircd.servers[data["source"]].name
for key, val in data["data"]:
user.sendMessage(irc.RPL_XINFOENTRY, typeName, key, val, prefix=sourceServerName)
return True
responseList = []
for key, val in data["data"]:
responseList.append("{} {}".format(key, val))
destServer = self.ircd.servers[user.uuid[:3]]
destServer.sendMessage("INFO", user.uuid, typeName, *responseList, prefix=data["source"])
return True
@implementer(ICommand)
class ServerInfoEnd(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) != 2:
return None
if prefix not in self.ircd.servers:
return None
if params[0] not in self.ircd.users:
return None
return {
"user": self.ircd.users[params[0]],
"type": params[1],
"source": self.ircd.servers[prefix]
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
user.sendMessage(irc.RPL_XINFOEND, data["type"], "End of STATS request", prefix=data["source"].name)
return True
nextServer = self.ircd.servers[user.uuid[:3]]
nextServer.sendMessage("INFOEND", user.uuid, data["type"], prefix=data["source"].serverID)
return True
statsCmd = StatsCommand() |
CymaticLabs/Unity3D.Amqp | refs/heads/master | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/spanhandlers/b.py | 1 | import Inline
info = {
"friendly_name": "Bold",
"example_template": "boldtext",
"summary": "Renders the contained markup with a bold typeface.",
}
def SpanHandler(rest, acc):
(inner, rest) = Inline.parse(rest)
acc.append(Inline.TagFragment('pyle_b', inner))
return rest
|
madhurrajn/samashthi | refs/heads/master | lib/django/contrib/gis/forms/__init__.py | 597 | from django.forms import * # NOQA
from .fields import ( # NOQA
GeometryCollectionField, GeometryField, LineStringField,
MultiLineStringField, MultiPointField, MultiPolygonField, PointField,
PolygonField,
)
from .widgets import BaseGeometryWidget, OpenLayersWidget, OSMWidget # NOQA
|
habibmasuro/kivy | refs/heads/master | kivy/input/postproc/__init__.py | 66 | '''
Input Postprocessing
====================
'''
__all__ = ('kivy_postproc_modules', )
import os
from kivy.input.postproc.doubletap import InputPostprocDoubleTap
from kivy.input.postproc.tripletap import InputPostprocTripleTap
from kivy.input.postproc.ignorelist import InputPostprocIgnoreList
from kivy.input.postproc.retaintouch import InputPostprocRetainTouch
from kivy.input.postproc.dejitter import InputPostprocDejitter
from kivy.input.postproc.calibration import InputPostprocCalibration
# Mapping of ID to module
kivy_postproc_modules = {}
# Don't go further if we generate documentation
if 'KIVY_DOC' not in os.environ:
kivy_postproc_modules['calibration'] = InputPostprocCalibration()
kivy_postproc_modules['retaintouch'] = InputPostprocRetainTouch()
kivy_postproc_modules['ignorelist'] = InputPostprocIgnoreList()
kivy_postproc_modules['doubletap'] = InputPostprocDoubleTap()
kivy_postproc_modules['tripletap'] = InputPostprocTripleTap()
kivy_postproc_modules['dejitter'] = InputPostprocDejitter()
|
rapidpro/tracpro | refs/heads/develop | tracpro/compress.py | 2 | from __future__ import unicode_literals
from compressor.filters.base import CompilerFilter
from compressor.filters.css_default import CssAbsoluteFilter
class LessFilter(CompilerFilter):
"""
See https://stackoverflow.com/questions/10423159/django-compressor-using-lessc-in-debug-mode/14842293
"""
def __init__(self, content, attrs, **kwargs):
super(LessFilter, self).__init__(content, command='lessc {infile} {outfile}', **kwargs)
def input(self, **kwargs):
content = super(LessFilter, self).input(**kwargs)
return CssAbsoluteFilter(content).input(**kwargs)
|
hnakamur/django | refs/heads/master | tests/utils_tests/test_module/bad_module.py | 581 | import a_package_name_that_does_not_exist # NOQA
content = 'Bad Module'
|
gilbertw/PTVS | refs/heads/master | Python/Tests/TestData/VirtualEnv/env/Lib/UserDict.py | 83 | """A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
|
mhuwiler/rootauto | refs/heads/mhuwiler | interpreter/llvm/src/utils/opt-viewer/opt-stats.py | 6 | #!/usr/bin/env python2.7
from __future__ import print_function
desc = '''Generate statistics about optimization records from the YAML files
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
The tools requires PyYAML and Pygments Python packages.'''
import optrecord
import argparse
import operator
from collections import defaultdict
from multiprocessing import cpu_count, Pool
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('yaml_files', nargs='+')
parser.add_argument(
'--jobs',
'-j',
default=cpu_count(),
type=int,
help='Max job count (defaults to current CPU count)')
args = parser.parse_args()
if len(args.yaml_files) == 0:
parser.print_help()
sys.exit(1)
if args.jobs == 1:
pmap = map
else:
pool = Pool(processes=args.jobs)
pmap = pool.map
all_remarks, file_remarks, _ = optrecord.gather_results(pmap, args.yaml_files)
bypass = defaultdict(int)
byname = defaultdict(int)
for r in all_remarks.itervalues():
bypass[r.Pass] += 1
byname[r.Pass + "/" + r.Name] += 1
total = len(all_remarks)
print("{:24s} {:10d}\n".format("Total number of remarks", total))
print("Top 10 remarks by pass:")
for (passname, count) in sorted(bypass.items(), key=operator.itemgetter(1),
reverse=True)[:10]:
print(" {:30s} {:2.0f}%". format(passname, count * 100. / total))
print("\nTop 10 remarks:")
for (name, count) in sorted(byname.items(), key=operator.itemgetter(1),
reverse=True)[:10]:
print(" {:30s} {:2.0f}%". format(name, count * 100. / total))
|
imbasimba/astroquery | refs/heads/obs-id-download | astroquery/tests/setup_package.py | 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {
'astroquery.tests': ['coveragerc']}
|
erickt/hue | refs/heads/master | desktop/core/ext-py/ctypes-1.0.2/ctypes/test/test_init.py | 66 | from ctypes import *
import unittest
class X(Structure):
_fields_ = [("a", c_int),
("b", c_int)]
new_was_called = False
def __new__(cls):
result = super(X, cls).__new__(cls)
result.new_was_called = True
return result
def __init__(self):
self.a = 9
self.b = 12
class Y(Structure):
_fields_ = [("x", X)]
class InitTest(unittest.TestCase):
def test_get(self):
# make sure the only accessing a nested structure
# doesn't call the structure's __new__ and __init__
y = Y()
self.failUnlessEqual((y.x.a, y.x.b), (0, 0))
self.failUnlessEqual(y.x.new_was_called, False)
# But explicitely creating an X structure calls __new__ and __init__, of course.
x = X()
self.failUnlessEqual((x.a, x.b), (9, 12))
self.failUnlessEqual(x.new_was_called, True)
y.x = x
self.failUnlessEqual((y.x.a, y.x.b), (9, 12))
self.failUnlessEqual(y.x.new_was_called, False)
if __name__ == "__main__":
unittest.main()
|
B-MOOC/edx-platform | refs/heads/master | lms/djangoapps/branding/api_urls.py | 152 | """
Branding API endpoint urls.
"""
from django.conf.urls import patterns, url
urlpatterns = patterns(
"",
url(
r"^footer$",
"branding.views.footer",
name="branding_footer",
),
)
|
jeremiah-c-leary/vhdl-style-guide | refs/heads/master | vsg/rules/architecture/rule_024.py | 1 |
from vsg.rules import insert_token_left_of_token_if_it_does_not_exist_between_tokens_using_value_from_token
from vsg.token import architecture_body as token
class rule_024(insert_token_left_of_token_if_it_does_not_exist_between_tokens_using_value_from_token):
'''
Architecture rule 024 checks for the architecture simple name in the "end architecture" statement.
'''
def __init__(self):
insert_token_left_of_token_if_it_does_not_exist_between_tokens_using_value_from_token.__init__(self, 'architecture', '024', token.architecture_simple_name, token.semicolon, token.end_keyword, token.semicolon, token.identifier)
self.solution = 'architecture simple name'
|
espenhgn/elephant | refs/heads/master | elephant/current_source_density_src/utility_functions.py | 6 | # -*- coding: utf-8 -*-
"""
These are some useful functions used in CSD methods,
They include CSD source profiles to be used as ground truths,
placement of electrodes in 1D, 2D and 3D., etc
These scripts are based on Grzegorz Parka's,
Google Summer of Code 2014, INFC/pykCSD
This was written by :
Michal Czerwinski, Chaitanya Chintaluri
Laboratory of Neuroinformatics,
Nencki Institute of Experimental Biology, Warsaw.
"""
from __future__ import division
import numpy as np
from numpy import exp
import quantities as pq
def patch_quantities():
"""patch quantities with the SI unit Siemens if it does not exist"""
for symbol, prefix, definition, u_symbol in zip(
['siemens', 'S', 'mS', 'uS', 'nS', 'pS'],
['', '', 'milli', 'micro', 'nano', 'pico'],
[pq.A / pq.V, pq.A / pq.V, 'S', 'mS', 'uS', 'nS'],
[None, None, None, None, u'µS', None]):
if type(definition) is str:
definition = lastdefinition / 1000
if not hasattr(pq, symbol):
setattr(pq, symbol, pq.UnitQuantity(
prefix + 'siemens',
definition,
symbol=symbol,
u_symbol=u_symbol))
lastdefinition = definition
return
def check_for_duplicated_electrodes(elec_pos):
"""Checks for duplicate electrodes
Parameters
----------
elec_pos : np.array
Returns
-------
has_duplicated_elec : Boolean
"""
unique_elec_pos = np.vstack({tuple(row) for row in elec_pos})
has_duplicated_elec = unique_elec_pos.shape == elec_pos.shape
return has_duplicated_elec
def distribute_srcs_1D(X, n_src, ext_x, R_init):
"""Distribute sources in 1D equally spaced
Parameters
----------
X : np.arrays
points at which CSD will be estimated
n_src : int
number of sources to be included in the model
ext_x : floats
how much should the sources extend the area X
R_init : float
Same as R in 1D case
Returns
-------
X_src : np.arrays
positions of the sources
R : float
effective radius of the basis element
"""
X_src = np.mgrid[(np.min(X) - ext_x):(np.max(X) + ext_x):
np.complex(0, n_src)]
R = R_init
return X_src, R
def distribute_srcs_2D(X, Y, n_src, ext_x, ext_y, R_init):
"""Distribute n_src's in the given area evenly
Parameters
----------
X, Y : np.arrays
points at which CSD will be estimated
n_src : int
demanded number of sources to be included in the model
ext_x, ext_y : floats
how should the sources extend the area X, Y
R_init : float
demanded radius of the basis element
Returns
-------
X_src, Y_src : np.arrays
positions of the sources
nx, ny : ints
number of sources in directions x,y
new n_src = nx * ny may not be equal to the demanded number of sources
R : float
effective radius of the basis element
"""
Lx = np.max(X) - np.min(X)
Ly = np.max(Y) - np.min(Y)
Lx_n = Lx + (2 * ext_x)
Ly_n = Ly + (2 * ext_y)
[nx, ny, Lx_nn, Ly_nn, ds] = get_src_params_2D(Lx_n, Ly_n, n_src)
ext_x_n = (Lx_nn - Lx) / 2
ext_y_n = (Ly_nn - Ly) / 2
X_src, Y_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n):
np.complex(0, nx),
(np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n):
np.complex(0, ny)]
# d = round(R_init / ds)
R = R_init # R = d * ds
return X_src, Y_src, R
def get_src_params_2D(Lx, Ly, n_src):
"""Distribute n_src sources evenly in a rectangle of size Lx * Ly
Parameters
----------
Lx, Ly : floats
lengths in the directions x, y of the area,
the sources should be placed
n_src : int
demanded number of sources
Returns
-------
nx, ny : ints
number of sources in directions x, y
new n_src = nx * ny may not be equal to the demanded number of sources
Lx_n, Ly_n : floats
updated lengths in the directions x, y
ds : float
spacing between the sources
"""
coeff = [Ly, Lx - Ly, -Lx * n_src]
rts = np.roots(coeff)
r = [r for r in rts if type(r) is not complex and r > 0]
nx = r[0]
ny = n_src / nx
ds = Lx / (nx - 1)
nx = np.floor(nx) + 1
ny = np.floor(ny) + 1
Lx_n = (nx - 1) * ds
Ly_n = (ny - 1) * ds
return (nx, ny, Lx_n, Ly_n, ds)
def distribute_srcs_3D(X, Y, Z, n_src, ext_x, ext_y, ext_z, R_init):
"""Distribute n_src sources evenly in a rectangle of size Lx * Ly * Lz
Parameters
----------
X, Y, Z : np.arrays
points at which CSD will be estimated
n_src : int
desired number of sources we want to include in the model
ext_x, ext_y, ext_z : floats
how should the sources extend over the area X,Y,Z
R_init : float
demanded radius of the basis element
Returns
-------
X_src, Y_src, Z_src : np.arrays
positions of the sources in 3D space
nx, ny, nz : ints
number of sources in directions x,y,z
new n_src = nx * ny * nz may not be equal to the demanded number of
sources
R : float
updated radius of the basis element
"""
Lx = np.max(X) - np.min(X)
Ly = np.max(Y) - np.min(Y)
Lz = np.max(Z) - np.min(Z)
Lx_n = Lx + 2 * ext_x
Ly_n = Ly + 2 * ext_y
Lz_n = Lz + 2 * ext_z
(nx, ny, nz, Lx_nn, Ly_nn, Lz_nn, ds) = get_src_params_3D(Lx_n,
Ly_n,
Lz_n,
n_src)
ext_x_n = (Lx_nn - Lx) / 2
ext_y_n = (Ly_nn - Ly) / 2
ext_z_n = (Lz_nn - Lz) / 2
X_src, Y_src, Z_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n):
np.complex(0, nx),
(np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n):
np.complex(0, ny),
(np.min(Z) - ext_z_n):(np.max(Z) + ext_z_n):
np.complex(0, nz)]
# d = np.round(R_init / ds)
R = R_init
return (X_src, Y_src, Z_src, R)
def get_src_params_3D(Lx, Ly, Lz, n_src):
"""Helps to evenly distribute n_src sources in a cuboid of size Lx * Ly * Lz
Parameters
----------
Lx, Ly, Lz : floats
lengths in the directions x, y, z of the area,
the sources should be placed
n_src : int
demanded number of sources to be included in the model
Returns
-------
nx, ny, nz : ints
number of sources in directions x, y, z
new n_src = nx * ny * nz may not be equal to the demanded number of
sources
Lx_n, Ly_n, Lz_n : floats
updated lengths in the directions x, y, z
ds : float
spacing between the sources (grid nodes)
"""
V = Lx * Ly * Lz
V_unit = V / n_src
L_unit = V_unit**(1. / 3.)
nx = np.ceil(Lx / L_unit)
ny = np.ceil(Ly / L_unit)
nz = np.ceil(Lz / L_unit)
ds = Lx / (nx - 1)
Lx_n = (nx - 1) * ds
Ly_n = (ny - 1) * ds
Lz_n = (nz - 1) * ds
return (nx, ny, nz, Lx_n, Ly_n, Lz_n, ds)
def generate_electrodes(dim, xlims=[0.1, 0.9], ylims=[0.1, 0.9],
zlims=[0.1, 0.9], res=5):
"""Generates electrodes, helpful for FWD funtion.
Parameters
----------
dim : int
Dimensionality of the electrodes, 1,2 or 3
xlims : [start, end]
Spatial limits of the electrodes
ylims : [start, end]
Spatial limits of the electrodes
zlims : [start, end]
Spatial limits of the electrodes
res : int
How many electrodes in each dimension
Returns
-------
ele_x, ele_y, ele_z : flattened np.array of the electrode pos
"""
if dim == 1:
ele_x = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res)]
ele_x = ele_x.flatten()
return ele_x
elif dim == 2:
ele_x, ele_y = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res),
ylims[0]: ylims[1]: np.complex(0, res)]
ele_x = ele_x.flatten()
ele_y = ele_y.flatten()
return ele_x, ele_y
elif dim == 3:
ele_x, ele_y, ele_z = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res),
ylims[0]: ylims[1]: np.complex(0, res),
zlims[0]: zlims[1]: np.complex(0, res)]
ele_x = ele_x.flatten()
ele_y = ele_y.flatten()
ele_z = ele_z.flatten()
return ele_x, ele_y, ele_z
def gauss_1d_dipole(x):
"""1D Gaussian dipole source is placed between 0 and 1
to be used to test the CSD
Parameters
----------
x : np.array
Spatial pts. at which the true csd is evaluated
Returns
-------
f : np.array
The value of the csd at the requested points
"""
src = 0.5*exp(-((x-0.7)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
snk = -0.5*exp(-((x-0.3)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
f = src+snk
return f
def large_source_2D(x, y):
"""2D Gaussian large source profile - to use to test csd
Parameters
----------
x : np.array
Spatial x pts. at which the true csd is evaluated
y : np.array
Spatial y pts. at which the true csd is evaluated
Returns
-------
f : np.array
The value of the csd at the requested points
"""
zz = [0.4, -0.3, -0.1, 0.6]
zs = [0.2, 0.3, 0.4, 0.2]
f1 = 0.5965*exp( (-1*(x-0.1350)**2 - (y-0.8628)**2) /0.4464)* exp(-(-zz[0])**2 / zs[0]) /exp(-(zz[0])**2/zs[0])
f2 = -0.9269*exp( (-2*(x-0.1848)**2 - (y-0.0897)**2) /0.2046)* exp(-(-zz[1])**2 / zs[1]) /exp(-(zz[1])**2/zs[1]);
f3 = 0.5910*exp( (-3*(x-1.3189)**2 - (y-0.3522)**2) /0.2129)* exp(-(-zz[2])**2 / zs[2]) /exp(-(zz[2])**2/zs[2]);
f4 = -0.1963*exp( (-4*(x-1.3386)**2 - (y-0.5297)**2) /0.2507)* exp(-(-zz[3])**2 / zs[3]) /exp(-(zz[3])**2/zs[3]);
f = f1+f2+f3+f4
return f
def small_source_2D(x, y):
"""2D Gaussian small source profile - to be used to test csd
Parameters
----------
x : np.array
Spatial x pts. at which the true csd is evaluated
y : np.array
Spatial y pts. at which the true csd is evaluated
Returns
-------
f : np.array
The value of the csd at the requested points
"""
def gauss2d(x,y,p):
rcen_x = p[0] * np.cos(p[5]) - p[1] * np.sin(p[5])
rcen_y = p[0] * np.sin(p[5]) + p[1] * np.cos(p[5])
xp = x * np.cos(p[5]) - y * np.sin(p[5])
yp = x * np.sin(p[5]) + y * np.cos(p[5])
g = p[4]*exp(-(((rcen_x-xp)/p[2])**2+
((rcen_y-yp)/p[3])**2)/2.)
return g
f1 = gauss2d(x,y,[0.3,0.7,0.038,0.058,0.5,0.])
f2 = gauss2d(x,y,[0.3,0.6,0.038,0.058,-0.5,0.])
f3 = gauss2d(x,y,[0.45,0.7,0.038,0.058,0.5,0.])
f4 = gauss2d(x,y,[0.45,0.6,0.038,0.058,-0.5,0.])
f = f1+f2+f3+f4
return f
def gauss_3d_dipole(x, y, z):
"""3D Gaussian dipole profile - to be used to test csd.
Parameters
----------
x : np.array
Spatial x pts. at which the true csd is evaluated
y : np.array
Spatial y pts. at which the true csd is evaluated
z : np.array
Spatial z pts. at which the true csd is evaluated
Returns
-------
f : np.array
The value of the csd at the requested points
"""
x0, y0, z0 = 0.3, 0.7, 0.3
x1, y1, z1 = 0.6, 0.5, 0.7
sig_2 = 0.023
A = (2*np.pi*sig_2)**-1
f1 = A*exp( (-(x-x0)**2 -(y-y0)**2 -(z-z0)**2) / (2*sig_2) )
f2 = -1*A*exp( (-(x-x1)**2 -(y-y1)**2 -(z-z1)**2) / (2*sig_2) )
f = f1+f2
return f
|
mozilla/bramble | refs/heads/master | vendor-local/lib/python/django_extensions/utils/text.py | 46 | from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
def truncate_letters(s, num):
""" truncates a string to a number of letters, similar to truncate_words """
s = force_unicode(s)
length = int(num)
if len(s) > length:
s = s[:length]
if not s.endswith('...'):
s += '...'
return s
truncate_letters = allow_lazy(truncate_letters, unicode)
|
tomi77/ems-cli | refs/heads/master | ems_cli/commands/list_config.py | 1 | import os
from . import BaseCommand
from ..i18n import _
class Command(BaseCommand):
name = os.path.splitext(os.path.basename(__file__))[0]
description = _('list with all push/pull configurations')
quiet_fields = {
}
def fill_arguments(self):
pass
def main():
Command().run()
|
edry/edx-platform | refs/heads/master | common/djangoapps/terrain/stubs/xqueue.py | 123 | """
Stub implementation of XQueue for acceptance tests.
Configuration values:
"default" (dict): Default response to be sent to LMS as a grade for a submission
"<submission>" (dict): Grade response to return for submissions containing the text <submission>
"register_submission_url" (str): URL to send grader payloads when we receive a submission
If no grade response is configured, a default response will be returned.
"""
from .http import StubHttpRequestHandler, StubHttpService, require_params
import json
import copy
from requests import post
from threading import Timer
class StubXQueueHandler(StubHttpRequestHandler):
"""
A handler for XQueue POST requests.
"""
DEFAULT_RESPONSE_DELAY = 2
DEFAULT_GRADE_RESPONSE = {'correct': True, 'score': 1, 'msg': ''}
@require_params('POST', 'xqueue_body', 'xqueue_header')
def do_POST(self):
"""
Handle a POST request from the client
Sends back an immediate success/failure response.
It then POSTS back to the client with grading results.
"""
msg = "XQueue received POST request {0} to path {1}".format(self.post_dict, self.path)
self.log_message(msg)
# Respond only to grading requests
if self._is_grade_request():
# If configured, send the grader payload to other services.
self._register_submission(self.post_dict['xqueue_body'])
try:
xqueue_header = json.loads(self.post_dict['xqueue_header'])
callback_url = xqueue_header['lms_callback_url']
except KeyError:
# If the message doesn't have a header or body,
# then it's malformed. Respond with failure
error_msg = "XQueue received invalid grade request"
self._send_immediate_response(False, message=error_msg)
except ValueError:
# If we could not decode the body or header,
# respond with failure
error_msg = "XQueue could not decode grade request"
self._send_immediate_response(False, message=error_msg)
else:
# Send an immediate response of success
# The grade request is formed correctly
self._send_immediate_response(True)
# Wait a bit before POSTing back to the callback url with the
# grade result configured by the server
# Otherwise, the problem will not realize it's
# queued and it will keep waiting for a response indefinitely
delayed_grade_func = lambda: self._send_grade_response(
callback_url, xqueue_header, self.post_dict['xqueue_body']
)
delay = self.server.config.get('response_delay', self.DEFAULT_RESPONSE_DELAY)
Timer(delay, delayed_grade_func).start()
# If we get a request that's not to the grading submission
# URL, return an error
else:
self._send_immediate_response(False, message="Invalid request URL")
def _send_immediate_response(self, success, message=""):
"""
Send an immediate success/failure message
back to the client
"""
# Send the response indicating success/failure
response_str = json.dumps(
{'return_code': 0 if success else 1, 'content': message}
)
if self._is_grade_request():
self.send_response(
200, content=response_str, headers={'Content-type': 'text/plain'}
)
self.log_message("XQueue: sent response {0}".format(response_str))
else:
self.send_response(500)
def _send_grade_response(self, postback_url, xqueue_header, xqueue_body_json):
"""
POST the grade response back to the client
using the response provided by the server configuration.
Uses the server configuration to determine what response to send:
1) Specific response for submissions containing matching text in `xqueue_body`
2) Default submission configured by client
3) Default submission
`postback_url` is the URL the client told us to post back to
`xqueue_header` (dict) is the full header the client sent us, which we will send back
to the client so it can authenticate us.
`xqueue_body_json` (json-encoded string) is the body of the submission the client sent us.
"""
# First check if we have a configured response that matches the submission body
grade_response = None
# This matches the pattern against the JSON-encoded xqueue_body
# This is very simplistic, but sufficient to associate a student response
# with a grading response.
# There is a danger here that a submission will match multiple response patterns.
# Rather than fail silently (which could cause unpredictable behavior in tests)
# we abort and log a debugging message.
for pattern, response in self.server.queue_responses:
if pattern in xqueue_body_json:
if grade_response is None:
grade_response = response
# Multiple matches, so abort and log an error
else:
self.log_error(
"Multiple response patterns matched '{0}'".format(xqueue_body_json),
)
return
# Fall back to the default grade response configured for this queue,
# then to the default response.
if grade_response is None:
grade_response = self.server.config.get(
'default', copy.deepcopy(self.DEFAULT_GRADE_RESPONSE)
)
# Wrap the message in <div> tags to ensure that it is valid XML
if isinstance(grade_response, dict) and 'msg' in grade_response:
grade_response['msg'] = "<div>{0}</div>".format(grade_response['msg'])
data = {
'xqueue_header': json.dumps(xqueue_header),
'xqueue_body': json.dumps(grade_response)
}
post(postback_url, data=data)
self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url))
def _register_submission(self, xqueue_body_json):
"""
If configured, send the submission's grader payload to another service.
"""
url = self.server.config.get('register_submission_url')
# If not configured, do not need to send anything
if url is not None:
try:
xqueue_body = json.loads(xqueue_body_json)
except ValueError:
self.log_error(
"Could not decode XQueue body as JSON: '{0}'".format(xqueue_body_json))
else:
# Retrieve the grader payload, which should be a JSON-encoded dict.
# We pass the payload directly to the service we are notifying, without
# inspecting the contents.
grader_payload = xqueue_body.get('grader_payload')
if grader_payload is not None:
response = post(url, data={'grader_payload': grader_payload})
if not response.ok:
self.log_error(
"Could register submission at URL '{0}'. Status was {1}".format(
url, response.status_code))
else:
self.log_message(
"XQueue body is missing 'grader_payload' key: '{0}'".format(xqueue_body)
)
def _is_grade_request(self):
"""
Return a boolean indicating whether the requested URL indicates a submission.
"""
return 'xqueue/submit' in self.path
class StubXQueueService(StubHttpService):
"""
A stub XQueue grading server that responds to POST requests to localhost.
"""
HANDLER_CLASS = StubXQueueHandler
NON_QUEUE_CONFIG_KEYS = ['default', 'register_submission_url']
@property
def queue_responses(self):
"""
Returns a list of (pattern, response) tuples, where `pattern` is a pattern
to match in the XQueue body, and `response` is a dictionary to return
as the response from the grader.
Every configuration key is a queue name,
except for 'default' and 'register_submission_url' which have special meaning
"""
return {
key: value
for key, value in self.config.iteritems()
if key not in self.NON_QUEUE_CONFIG_KEYS
}.items()
|
pomegranited/edx-platform | refs/heads/master | common/djangoapps/util/organizations_helpers.py | 3 | """
Utility library for working with the edx-organizations app
"""
from django.conf import settings
from django.db.utils import DatabaseError
def add_organization(organization_data):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
return None
from organizations import api as organizations_api
return organizations_api.add_organization(organization_data=organization_data)
def add_organization_course(organization_data, course_id):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
return None
from organizations import api as organizations_api
return organizations_api.add_organization_course(organization_data=organization_data, course_key=course_id)
def get_organization(organization_id):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
return []
from organizations import api as organizations_api
return organizations_api.get_organization(organization_id)
def get_organizations():
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
return []
from organizations import api as organizations_api
# Due to the way unit tests run for edx-platform, models are not yet available at the time
# of Django admin form instantiation. This unfortunately results in an invocation of the following
# workflow, because the test configuration is (correctly) configured to exercise the application
# The good news is that this case does not manifest in the Real World, because migrations have
# been run ahead of application instantiation and the flag set only when that is truly the case.
try:
return organizations_api.get_organizations()
except DatabaseError:
return []
def get_organization_courses(organization_id):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
return []
from organizations import api as organizations_api
return organizations_api.get_organization_courses(organization_id)
def get_course_organizations(course_id):
"""
Client API operation adapter/wrapper
"""
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
return []
from organizations import api as organizations_api
return organizations_api.get_course_organizations(course_id)
|
UniqueDroid/kernel_omap_tuna | refs/heads/master | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
alexandrucoman/vbox-nova-driver | refs/heads/master | nova/api/openstack/compute/contrib/extended_volumes.py | 56 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import objects
authorize = extensions.soft_extension_authorizer('compute', 'extended_volumes')
class ExtendedVolumesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedVolumesController, self).__init__(*args, **kwargs)
def _extend_server(self, context, server, instance):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
volume_ids = [bdm.volume_id for bdm in bdms if bdm.volume_id]
key = "%s:volumes_attached" % Extended_volumes.alias
server[key] = [{'id': volume_id} for volume_id in volume_ids]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
class Extended_volumes(extensions.ExtensionDescriptor):
"""Extended Volumes support."""
name = "ExtendedVolumes"
alias = "os-extended-volumes"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_volumes/api/v1.1")
updated = "2013-06-07T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedVolumesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
CompPhysics/MachineLearning | refs/heads/master | doc/LectureNotes/_build/jupyter_execute/chapter3.py | 1 | # Ridge and Lasso Regression
[Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h20/forelesningsvideoer/LectureSeptember10.mp4?vrtx=view-as-webpage)
## The singular value decomposition
The examples we have looked at so far are cases where we normally can
invert the matrix $\boldsymbol{X}^T\boldsymbol{X}$. Using a polynomial expansion as we
did both for the masses and the fitting of the equation of state,
leads to row vectors of the design matrix which are essentially
orthogonal due to the polynomial character of our model. Obtaining the inverse of the design matrix is then often done via a so-called LU, QR or Cholesky decomposition.
This may
however not the be case in general and a standard matrix inversion
algorithm based on say LU, QR or Cholesky decomposition may lead to singularities. We will see examples of this below.
There is however a way to partially circumvent this problem and also gain some insights about the ordinary least squares approach, and later shrinkage methods like Ridge and Lasso regressions.
This is given by the **Singular Value Decomposition** algorithm, perhaps
the most powerful linear algebra algorithm. Let us look at a
different example where we may have problems with the standard matrix
inversion algorithm. Thereafter we dive into the math of the SVD.
One of the typical problems we encounter with linear regression, in particular
when the matrix $\boldsymbol{X}$ (our so-called design matrix) is high-dimensional,
are problems with near singular or singular matrices. The column vectors of $\boldsymbol{X}$
may be linearly dependent, normally referred to as super-collinearity.
This means that the matrix may be rank deficient and it is basically impossible to
to model the data using linear regression. As an example, consider the matrix
$$
\begin{align*}
\mathbf{X} & = \left[
\begin{array}{rrr}
1 & -1 & 2
\\
1 & 0 & 1
\\
1 & 2 & -1
\\
1 & 1 & 0
\end{array} \right]
\end{align*}
$$
The columns of $\boldsymbol{X}$ are linearly dependent. We see this easily since the
the first column is the row-wise sum of the other two columns. The rank (more correct,
the column rank) of a matrix is the dimension of the space spanned by the
column vectors. Hence, the rank of $\mathbf{X}$ is equal to the number
of linearly independent columns. In this particular case the matrix has rank 2.
Super-collinearity of an $(n \times p)$-dimensional design matrix $\mathbf{X}$ implies
that the inverse of the matrix $\boldsymbol{X}^T\boldsymbol{X}$ (the matrix we need to invert to solve the linear regression equations) is non-invertible. If we have a square matrix that does not have an inverse, we say this matrix singular. The example here demonstrates this
$$
\begin{align*}
\boldsymbol{X} & = \left[
\begin{array}{rr}
1 & -1
\\
1 & -1
\end{array} \right].
\end{align*}
$$
We see easily that $\mbox{det}(\boldsymbol{X}) = x_{11} x_{22} - x_{12} x_{21} = 1 \times (-1) - 1 \times (-1) = 0$. Hence, $\mathbf{X}$ is singular and its inverse is undefined.
This is equivalent to saying that the matrix $\boldsymbol{X}$ has at least an eigenvalue which is zero.
If our design matrix $\boldsymbol{X}$ which enters the linear regression problem
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
\boldsymbol{\beta} = (\boldsymbol{X}^{T} \boldsymbol{X})^{-1} \boldsymbol{X}^{T} \boldsymbol{y},
\label{_auto1} \tag{1}
\end{equation}
$$
has linearly dependent column vectors, we will not be able to compute the inverse
of $\boldsymbol{X}^T\boldsymbol{X}$ and we cannot find the parameters (estimators) $\beta_i$.
The estimators are only well-defined if $(\boldsymbol{X}^{T}\boldsymbol{X})^{-1}$ exits.
This is more likely to happen when the matrix $\boldsymbol{X}$ is high-dimensional. In this case it is likely to encounter a situation where
the regression parameters $\beta_i$ cannot be estimated.
A cheap *ad hoc* approach is simply to add a small diagonal component to the matrix to invert, that is we change
$$
\boldsymbol{X}^{T} \boldsymbol{X} \rightarrow \boldsymbol{X}^{T} \boldsymbol{X}+\lambda \boldsymbol{I},
$$
where $\boldsymbol{I}$ is the identity matrix. When we discuss **Ridge** regression this is actually what we end up evaluating. The parameter $\lambda$ is called a hyperparameter. More about this later.
From standard linear algebra we know that a square matrix $\boldsymbol{X}$ can be diagonalized if and only it is
a so-called [normal matrix](https://en.wikipedia.org/wiki/Normal_matrix), that is if $\boldsymbol{X}\in {\mathbb{R}}^{n\times n}$
we have $\boldsymbol{X}\boldsymbol{X}^T=\boldsymbol{X}^T\boldsymbol{X}$ or if $\boldsymbol{X}\in {\mathbb{C}}^{n\times n}$ we have $\boldsymbol{X}\boldsymbol{X}^{\dagger}=\boldsymbol{X}^{\dagger}\boldsymbol{X}$.
The matrix has then a set of eigenpairs
$$
(\lambda_1,\boldsymbol{u}_1),\dots, (\lambda_n,\boldsymbol{u}_n),
$$
and the eigenvalues are given by the diagonal matrix
$$
\boldsymbol{\Sigma}=\mathrm{Diag}(\lambda_1, \dots,\lambda_n).
$$
The matrix $\boldsymbol{X}$ can be written in terms of an orthogonal/unitary transformation $\boldsymbol{U}$
$$
\boldsymbol{X} = \boldsymbol{U}\boldsymbol{\Sigma}\boldsymbol{V}^T,
$$
with $\boldsymbol{U}\boldsymbol{U}^T=\boldsymbol{I}$ or $\boldsymbol{U}\boldsymbol{U}^{\dagger}=\boldsymbol{I}$.
Not all square matrices are diagonalizable. A matrix like the one discussed above
$$
\boldsymbol{X} = \begin{bmatrix}
1& -1 \\
1& -1\\
\end{bmatrix}
$$
is not diagonalizable, it is a so-called [defective matrix](https://en.wikipedia.org/wiki/Defective_matrix). It is easy to see that the condition
$\boldsymbol{X}\boldsymbol{X}^T=\boldsymbol{X}^T\boldsymbol{X}$ is not fulfilled.
## The SVD, a Fantastic Algorithm
However, and this is the strength of the SVD algorithm, any general
matrix $\boldsymbol{X}$ can be decomposed in terms of a diagonal matrix and
two orthogonal/unitary matrices. The [Singular Value Decompostion
(SVD) theorem](https://en.wikipedia.org/wiki/Singular_value_decomposition)
states that a general $m\times n$ matrix $\boldsymbol{X}$ can be written in
terms of a diagonal matrix $\boldsymbol{\Sigma}$ of dimensionality $m\times n$
and two orthognal matrices $\boldsymbol{U}$ and $\boldsymbol{V}$, where the first has
dimensionality $m \times m$ and the last dimensionality $n\times n$.
We have then
$$
\boldsymbol{X} = \boldsymbol{U}\boldsymbol{\Sigma}\boldsymbol{V}^T
$$
As an example, the above defective matrix can be decomposed as
$$
\boldsymbol{X} = \frac{1}{\sqrt{2}}\begin{bmatrix} 1& 1 \\ 1& -1\\ \end{bmatrix} \begin{bmatrix} 2& 0 \\ 0& 0\\ \end{bmatrix} \frac{1}{\sqrt{2}}\begin{bmatrix} 1& -1 \\ 1& 1\\ \end{bmatrix}=\boldsymbol{U}\boldsymbol{\Sigma}\boldsymbol{V}^T,
$$
with eigenvalues $\sigma_1=2$ and $\sigma_2=0$.
The SVD exits always!
The SVD
decomposition (singular values) gives eigenvalues
$\sigma_i\geq\sigma_{i+1}$ for all $i$ and for dimensions larger than $i=p$, the
eigenvalues (singular values) are zero.
In the general case, where our design matrix $\boldsymbol{X}$ has dimension
$n\times p$, the matrix is thus decomposed into an $n\times n$
orthogonal matrix $\boldsymbol{U}$, a $p\times p$ orthogonal matrix $\boldsymbol{V}$
and a diagonal matrix $\boldsymbol{\Sigma}$ with $r=\mathrm{min}(n,p)$
singular values $\sigma_i\geq 0$ on the main diagonal and zeros filling
the rest of the matrix. There are at most $p$ singular values
assuming that $n > p$. In our regression examples for the nuclear
masses and the equation of state this is indeed the case, while for
the Ising model we have $p > n$. These are often cases that lead to
near singular or singular matrices.
The columns of $\boldsymbol{U}$ are called the left singular vectors while the columns of $\boldsymbol{V}$ are the right singular vectors.
## Economy-size SVD
If we assume that $n > p$, then our matrix $\boldsymbol{U}$ has dimension $n
\times n$. The last $n-p$ columns of $\boldsymbol{U}$ become however
irrelevant in our calculations since they are multiplied with the
zeros in $\boldsymbol{\Sigma}$.
The economy-size decomposition removes extra rows or columns of zeros
from the diagonal matrix of singular values, $\boldsymbol{\Sigma}$, along with the columns
in either $\boldsymbol{U}$ or $\boldsymbol{V}$ that multiply those zeros in the expression.
Removing these zeros and columns can improve execution time
and reduce storage requirements without compromising the accuracy of
the decomposition.
If $n > p$, we keep only the first $p$ columns of $\boldsymbol{U}$ and $\boldsymbol{\Sigma}$ has dimension $p\times p$.
If $p > n$, then only the first $n$ columns of $\boldsymbol{V}$ are computed and $\boldsymbol{\Sigma}$ has dimension $n\times n$.
The $n=p$ case is obvious, we retain the full SVD.
In general the economy-size SVD leads to less FLOPS and still conserving the desired accuracy.
import numpy as np
# SVD inversion
def SVDinv(A):
''' Takes as input a numpy matrix A and returns inv(A) based on singular value decomposition (SVD).
SVD is numerically more stable than the inversion algorithms provided by
numpy and scipy.linalg at the cost of being slower.
'''
U, s, VT = np.linalg.svd(A)
# print('test U')
# print( (np.transpose(U) @ U - U @np.transpose(U)))
# print('test VT')
# print( (np.transpose(VT) @ VT - VT @np.transpose(VT)))
print(U)
print(s)
print(VT)
D = np.zeros((len(U),len(VT)))
for i in range(0,len(VT)):
D[i,i]=s[i]
UT = np.transpose(U); V = np.transpose(VT); invD = np.linalg.inv(D)
return np.matmul(V,np.matmul(invD,UT))
X = np.array([ [1.0, -1.0, 2.0], [1.0, 0.0, 1.0], [1.0, 2.0, -1.0], [1.0, 1.0, 0.0] ])
print(X)
A = np.transpose(X) @ X
print(A)
# Brute force inversion of super-collinear matrix
#B = np.linalg.inv(A)
#print(B)
C = SVDinv(A)
print(C)
The matrix $\boldsymbol{X}$ has columns that are linearly dependent. The first
column is the row-wise sum of the other two columns. The rank of a
matrix (the column rank) is the dimension of space spanned by the
column vectors. The rank of the matrix is the number of linearly
independent columns, in this case just $2$. We see this from the
singular values when running the above code. Running the standard
inversion algorithm for matrix inversion with $\boldsymbol{X}^T\boldsymbol{X}$ results
in the program terminating due to a singular matrix.
There are several interesting mathematical properties which will be
relevant when we are going to discuss the differences between say
ordinary least squares (OLS) and **Ridge** regression.
We have from OLS that the parameters of the linear approximation are given by
$$
\boldsymbol{\tilde{y}} = \boldsymbol{X}\boldsymbol{\beta} = \boldsymbol{X}\left(\boldsymbol{X}^T\boldsymbol{X}\right)^{-1}\boldsymbol{X}^T\boldsymbol{y}.
$$
The matrix to invert can be rewritten in terms of our SVD decomposition as
$$
\boldsymbol{X}^T\boldsymbol{X} = \boldsymbol{V}\boldsymbol{\Sigma}^T\boldsymbol{U}^T\boldsymbol{U}\boldsymbol{\Sigma}\boldsymbol{V}^T.
$$
Using the orthogonality properties of $\boldsymbol{U}$ we have
$$
\boldsymbol{X}^T\boldsymbol{X} = \boldsymbol{V}\boldsymbol{\Sigma}^T\boldsymbol{\Sigma}\boldsymbol{V}^T = \boldsymbol{V}\boldsymbol{D}\boldsymbol{V}^T,
$$
with $\boldsymbol{D}$ being a diagonal matrix with values along the diagonal given by the singular values squared.
This means that
$$
(\boldsymbol{X}^T\boldsymbol{X})\boldsymbol{V} = \boldsymbol{V}\boldsymbol{D},
$$
that is the eigenvectors of $(\boldsymbol{X}^T\boldsymbol{X})$ are given by the columns of the right singular matrix of $\boldsymbol{X}$ and the eigenvalues are the squared singular values. It is easy to show (show this) that
$$
(\boldsymbol{X}\boldsymbol{X}^T)\boldsymbol{U} = \boldsymbol{U}\boldsymbol{D},
$$
that is, the eigenvectors of $(\boldsymbol{X}\boldsymbol{X})^T$ are the columns of the left singular matrix and the eigenvalues are the same.
Going back to our OLS equation we have
$$
\boldsymbol{X}\boldsymbol{\beta} = \boldsymbol{X}\left(\boldsymbol{V}\boldsymbol{D}\boldsymbol{V}^T \right)^{-1}\boldsymbol{X}^T\boldsymbol{y}=\boldsymbol{U\Sigma V^T}\left(\boldsymbol{V}\boldsymbol{D}\boldsymbol{V}^T \right)^{-1}(\boldsymbol{U\Sigma V^T})^T\boldsymbol{y}=\boldsymbol{U}\boldsymbol{U}^T\boldsymbol{y}.
$$
We will come back to this expression when we discuss Ridge regression.
$$ \tilde{y}^{OLS}=\boldsymbol{X}\hat{\beta}^{OLS}=\sum_{j=1}^p \boldsymbol{u}_j\boldsymbol{u}_j^T\boldsymbol{y}$$ and for Ridge we have
$$ \tilde{y}^{Ridge}=\boldsymbol{X}\hat{\beta}^{Ridge}=\sum_{j=1}^p \boldsymbol{u}_j\frac{\sigma_j^2}{\sigma_j^2+\lambda}\boldsymbol{u}_j^T\boldsymbol{y}$$ .
It is indeed the economy-sized SVD, note the summation runs up tp $$p$$ only and not $$n$$.
Here we have that $$\boldsymbol{X} = \boldsymbol{U}\boldsymbol{\Sigma}\boldsymbol{V}^T$$, with $$\Sigma$$ being an $$ n\times p$$ matrix and $$\boldsymbol{V}$$ being a $$ p\times p$$ matrix. We also have assumed here that $$ n > p$$.
## Ridge and LASSO Regression
[Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h20/forelesningsvideoer/LectureSeptember11.mp4?vrtx=view-as-webpage)
Let us remind ourselves about the expression for the standard Mean Squared Error (MSE) which we used to define our cost function and the equations for the ordinary least squares (OLS) method, that is
our optimization problem is
$$
{\displaystyle \min_{\boldsymbol{\beta}\in {\mathbb{R}}^{p}}}\frac{1}{n}\left\{\left(\boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta}\right)^T\left(\boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta}\right)\right\}.
$$
or we can state it as
$$
{\displaystyle \min_{\boldsymbol{\beta}\in
{\mathbb{R}}^{p}}}\frac{1}{n}\sum_{i=0}^{n-1}\left(y_i-\tilde{y}_i\right)^2=\frac{1}{n}\vert\vert \boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta}\vert\vert_2^2,
$$
where we have used the definition of a norm-2 vector, that is
$$
\vert\vert \boldsymbol{x}\vert\vert_2 = \sqrt{\sum_i x_i^2}.
$$
By minimizing the above equation with respect to the parameters
$\boldsymbol{\beta}$ we could then obtain an analytical expression for the
parameters $\boldsymbol{\beta}$. We can add a regularization parameter $\lambda$ by
defining a new cost function to be optimized, that is
$$
{\displaystyle \min_{\boldsymbol{\beta}\in
{\mathbb{R}}^{p}}}\frac{1}{n}\vert\vert \boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta}\vert\vert_2^2+\lambda\vert\vert \boldsymbol{\beta}\vert\vert_2^2
$$
which leads to the Ridge regression minimization problem where we
require that $\vert\vert \boldsymbol{\beta}\vert\vert_2^2\le t$, where $t$ is
a finite number larger than zero. By defining
$$
C(\boldsymbol{X},\boldsymbol{\beta})=\frac{1}{n}\vert\vert \boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta}\vert\vert_2^2+\lambda\vert\vert \boldsymbol{\beta}\vert\vert_1,
$$
we have a new optimization equation
$$
{\displaystyle \min_{\boldsymbol{\beta}\in
{\mathbb{R}}^{p}}}\frac{1}{n}\vert\vert \boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta}\vert\vert_2^2+\lambda\vert\vert \boldsymbol{\beta}\vert\vert_1
$$
which leads to Lasso regression. Lasso stands for least absolute shrinkage and selection operator.
Here we have defined the norm-1 as
$$
\vert\vert \boldsymbol{x}\vert\vert_1 = \sum_i \vert x_i\vert.
$$
Using the matrix-vector expression for Ridge regression,
$$
C(\boldsymbol{X},\boldsymbol{\beta})=\frac{1}{n}\left\{(\boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta})^T(\boldsymbol{y}-\boldsymbol{X}\boldsymbol{\beta})\right\}+\lambda\boldsymbol{\beta}^T\boldsymbol{\beta},
$$
by taking the derivatives with respect to $\boldsymbol{\beta}$ we obtain then
a slightly modified matrix inversion problem which for finite values
of $\lambda$ does not suffer from singularity problems. We obtain
$$
\boldsymbol{\beta}^{\mathrm{Ridge}} = \left(\boldsymbol{X}^T\boldsymbol{X}+\lambda\boldsymbol{I}\right)^{-1}\boldsymbol{X}^T\boldsymbol{y},
$$
with $\boldsymbol{I}$ being a $p\times p$ identity matrix with the constraint that
$$
\sum_{i=0}^{p-1} \beta_i^2 \leq t,
$$
with $t$ a finite positive number.
We see that Ridge regression is nothing but the standard
OLS with a modified diagonal term added to $\boldsymbol{X}^T\boldsymbol{X}$. The
consequences, in particular for our discussion of the bias-variance tradeoff
are rather interesting.
Furthermore, if we use the result above in terms of the SVD decomposition (our analysis was done for the OLS method), we had
$$
(\boldsymbol{X}\boldsymbol{X}^T)\boldsymbol{U} = \boldsymbol{U}\boldsymbol{D}.
$$
We can analyse the OLS solutions in terms of the eigenvectors (the columns) of the right singular value matrix $\boldsymbol{U}$ as
$$
\boldsymbol{X}\boldsymbol{\beta} = \boldsymbol{X}\left(\boldsymbol{V}\boldsymbol{D}\boldsymbol{V}^T \right)^{-1}\boldsymbol{X}^T\boldsymbol{y}=\boldsymbol{U\Sigma V^T}\left(\boldsymbol{V}\boldsymbol{D}\boldsymbol{V}^T \right)^{-1}(\boldsymbol{U\Sigma V^T})^T\boldsymbol{y}=\boldsymbol{U}\boldsymbol{U}^T\boldsymbol{y}
$$
For Ridge regression this becomes
$$
\boldsymbol{X}\boldsymbol{\beta}^{\mathrm{Ridge}} = \boldsymbol{U\Sigma V^T}\left(\boldsymbol{V}\boldsymbol{D}\boldsymbol{V}^T+\lambda\boldsymbol{I} \right)^{-1}(\boldsymbol{U\Sigma V^T})^T\boldsymbol{y}=\sum_{j=0}^{p-1}\boldsymbol{u}_j\boldsymbol{u}_j^T\frac{\sigma_j^2}{\sigma_j^2+\lambda}\boldsymbol{y},
$$
with the vectors $\boldsymbol{u}_j$ being the columns of $\boldsymbol{U}$.
Since $\lambda \geq 0$, it means that compared to OLS, we have
$$
\frac{\sigma_j^2}{\sigma_j^2+\lambda} \leq 1.
$$
Ridge regression finds the coordinates of $\boldsymbol{y}$ with respect to the
orthonormal basis $\boldsymbol{U}$, it then shrinks the coordinates by
$\frac{\sigma_j^2}{\sigma_j^2+\lambda}$. Recall that the SVD has
eigenvalues ordered in a descending way, that is $\sigma_i \geq
\sigma_{i+1}$.
For small eigenvalues $\sigma_i$ it means that their contributions become less important, a fact which can be used to reduce the number of degrees of freedom.
Actually, calculating the variance of $\boldsymbol{X}\boldsymbol{v}_j$ shows that this quantity is equal to $\sigma_j^2/n$.
With a parameter $\lambda$ we can thus shrink the role of specific parameters.
For the sake of simplicity, let us assume that the design matrix is orthonormal, that is
$$
\boldsymbol{X}^T\boldsymbol{X}=(\boldsymbol{X}^T\boldsymbol{X})^{-1} =\boldsymbol{I}.
$$
In this case the standard OLS results in
$$
\boldsymbol{\beta}^{\mathrm{OLS}} = \boldsymbol{X}^T\boldsymbol{y}=\sum_{i=0}^{p-1}\boldsymbol{u}_j\boldsymbol{u}_j^T\boldsymbol{y},
$$
and
$$
\boldsymbol{\beta}^{\mathrm{Ridge}} = \left(\boldsymbol{I}+\lambda\boldsymbol{I}\right)^{-1}\boldsymbol{X}^T\boldsymbol{y}=\left(1+\lambda\right)^{-1}\boldsymbol{\beta}^{\mathrm{OLS}},
$$
that is the Ridge estimator scales the OLS estimator by the inverse of a factor $1+\lambda$, and
the Ridge estimator converges to zero when the hyperparameter goes to
infinity.
We will come back to more interpreations after we have gone through some of the statistical analysis part.
For more discussions of Ridge and Lasso regression, [Wessel van Wieringen's](https://arxiv.org/abs/1509.09169) article is highly recommended.
Similarly, [Mehta et al's article](https://arxiv.org/abs/1803.08823) is also recommended.
## A better understanding of regularization
The parameter $\lambda$ that we have introduced in the Ridge (and
Lasso as well) regression is often called a regularization parameter
or shrinkage parameter. It is common to call it a hyperparameter. What does it mean mathemtically?
Here we will first look at how to analyze the difference between the
standard OLS equations and the Ridge expressions in terms of a linear
algebra analysis using the SVD algorithm. Thereafter, we will link
(see the material on the bias-variance tradeoff below) these
observation to the statisical analysis of the results. In particular
we consider how the variance of the parameters $\boldsymbol{\beta}$ is
affected by changing the parameter $\lambda$.
We have our design matrix
$\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$. With the SVD we decompose it as
$$
\boldsymbol{X} = \boldsymbol{U\Sigma V^T},
$$
with $\boldsymbol{U}\in {\mathbb{R}}^{n\times n}$, $\boldsymbol{\Sigma}\in {\mathbb{R}}^{n\times p}$
and $\boldsymbol{V}\in {\mathbb{R}}^{p\times p}$.
The matrices $\boldsymbol{U}$ and $\boldsymbol{V}$ are unitary/orthonormal matrices, that is in case the matrices are real we have $\boldsymbol{U}^T\boldsymbol{U}=\boldsymbol{U}\boldsymbol{U}^T=\boldsymbol{I}$ and $\boldsymbol{V}^T\boldsymbol{V}=\boldsymbol{V}\boldsymbol{V}^T=\boldsymbol{I}$.
## Introducing the Covariance and Correlation functions
Before we discuss the link between for example Ridge regression and the singular value decomposition, we need to remind ourselves about
the definition of the covariance and the correlation function. These are quantities
Suppose we have defined two vectors
$\hat{x}$ and $\hat{y}$ with $n$ elements each. The covariance matrix $\boldsymbol{C}$ is defined as
$$
\boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{cov}[\boldsymbol{x},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\
\mathrm{cov}[\boldsymbol{y},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{y},\boldsymbol{y}] \\
\end{bmatrix},
$$
where for example
$$
\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}).
$$
With this definition and recalling that the variance is defined as
$$
\mathrm{var}[\boldsymbol{x}]=\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})^2,
$$
we can rewrite the covariance matrix as
$$
\boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{var}[\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\
\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] & \mathrm{var}[\boldsymbol{y}] \\
\end{bmatrix}.
$$
The covariance takes values between zero and infinity and may thus
lead to problems with loss of numerical precision for particularly
large values. It is common to scale the covariance matrix by
introducing instead the correlation matrix defined via the so-called
correlation function
$$
\mathrm{corr}[\boldsymbol{x},\boldsymbol{y}]=\frac{\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}]}{\sqrt{\mathrm{var}[\boldsymbol{x}] \mathrm{var}[\boldsymbol{y}]}}.
$$
The correlation function is then given by values $\mathrm{corr}[\boldsymbol{x},\boldsymbol{y}]
\in [-1,1]$. This avoids eventual problems with too large values. We
can then define the correlation matrix for the two vectors $\boldsymbol{x}$
and $\boldsymbol{y}$ as
$$
\boldsymbol{K}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} 1 & \mathrm{corr}[\boldsymbol{x},\boldsymbol{y}] \\
\mathrm{corr}[\boldsymbol{y},\boldsymbol{x}] & 1 \\
\end{bmatrix},
$$
In the above example this is the function we constructed using **pandas**.
In our derivation of the various regression algorithms like **Ordinary Least Squares** or **Ridge regression**
we defined the design/feature matrix $\boldsymbol{X}$ as
$$
\boldsymbol{X}=\begin{bmatrix}
x_{0,0} & x_{0,1} & x_{0,2}& \dots & \dots x_{0,p-1}\\
x_{1,0} & x_{1,1} & x_{1,2}& \dots & \dots x_{1,p-1}\\
x_{2,0} & x_{2,1} & x_{2,2}& \dots & \dots x_{2,p-1}\\
\dots & \dots & \dots & \dots \dots & \dots \\
x_{n-2,0} & x_{n-2,1} & x_{n-2,2}& \dots & \dots x_{n-2,p-1}\\
x_{n-1,0} & x_{n-1,1} & x_{n-1,2}& \dots & \dots x_{n-1,p-1}\\
\end{bmatrix},
$$
with $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$, with the predictors/features $p$ refering to the column numbers and the
entries $n$ being the row elements.
We can rewrite the design/feature matrix in terms of its column vectors as
$$
\boldsymbol{X}=\begin{bmatrix} \boldsymbol{x}_0 & \boldsymbol{x}_1 & \boldsymbol{x}_2 & \dots & \dots & \boldsymbol{x}_{p-1}\end{bmatrix},
$$
with a given vector
$$
\boldsymbol{x}_i^T = \begin{bmatrix}x_{0,i} & x_{1,i} & x_{2,i}& \dots & \dots x_{n-1,i}\end{bmatrix}.
$$
With these definitions, we can now rewrite our $2\times 2$
correaltion/covariance matrix in terms of a moe general design/feature
matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$. This leads to a $p\times p$
covariance matrix for the vectors $\boldsymbol{x}_i$ with $i=0,1,\dots,p-1$
$$
\boldsymbol{C}[\boldsymbol{x}] = \begin{bmatrix}
\mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\
\mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\
\mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_1] & \mathrm{var}[\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\
\dots & \dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots & \dots \\
\mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & \mathrm{var}[\boldsymbol{x}_{p-1}]\\
\end{bmatrix},
$$
and the correlation matrix
$$
\boldsymbol{K}[\boldsymbol{x}] = \begin{bmatrix}
1 & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\
\mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_0] & 1 & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\
\mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_1] & 1 & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\
\dots & \dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots & \dots \\
\mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & 1\\
\end{bmatrix},
$$
The Numpy function **np.cov** calculates the covariance elements using
the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have
the exact mean values. The following simple function uses the
**np.vstack** function which takes each vector of dimension $1\times n$
and produces a $2\times n$ matrix $\boldsymbol{W}$
$$
\boldsymbol{W} = \begin{bmatrix} x_0 & y_0 \\
x_1 & y_1 \\
x_2 & y_2\\
\dots & \dots \\
x_{n-2} & y_{n-2}\\
x_{n-1} & y_{n-1} &
\end{bmatrix},
$$
which in turn is converted into into the $2\times 2$ covariance matrix
$\boldsymbol{C}$ via the Numpy function **np.cov()**. We note that we can also calculate
the mean value of each set of samples $\boldsymbol{x}$ etc using the Numpy
function **np.mean(x)**. We can also extract the eigenvalues of the
covariance matrix through the **np.linalg.eig()** function.
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
W = np.vstack((x, y))
C = np.cov(W)
print(C)
The previous example can be converted into the correlation matrix by
simply scaling the matrix elements with the variances. We should also
subtract the mean values for each column. This leads to the following
code which sets up the correlations matrix for the previous example in
a more brute force way. Here we scale the mean values for each column of the design matrix, calculate the relevant mean values and variances and then finally set up the $2\times 2$ correlation matrix (since we have only two vectors).
import numpy as np
n = 100
# define two vectors
x = np.random.random(size=n)
y = 4+3*x+np.random.normal(size=n)
#scaling the x and y vectors
x = x - np.mean(x)
y = y - np.mean(y)
variance_x = np.sum(x@x)/n
variance_y = np.sum(y@y)/n
print(variance_x)
print(variance_y)
cov_xy = np.sum(x@y)/n
cov_xx = np.sum(x@x)/n
cov_yy = np.sum(y@y)/n
C = np.zeros((2,2))
C[0,0]= cov_xx/variance_x
C[1,1]= cov_yy/variance_y
C[0,1]= cov_xy/np.sqrt(variance_y*variance_x)
C[1,0]= C[0,1]
print(C)
We see that the matrix elements along the diagonal are one as they
should be and that the matrix is symmetric. Furthermore, diagonalizing
this matrix we easily see that it is a positive definite matrix.
The above procedure with **numpy** can be made more compact if we use **pandas**.
We whow here how we can set up the correlation matrix using **pandas**, as done in this simple code
import numpy as np
import pandas as pd
n = 10
x = np.random.normal(size=n)
x = x - np.mean(x)
y = 4+3*x+np.random.normal(size=n)
y = y - np.mean(y)
X = (np.vstack((x, y))).T
print(X)
Xpd = pd.DataFrame(X)
print(Xpd)
correlation_matrix = Xpd.corr()
print(correlation_matrix)
We expand this model to the Franke function discussed above.
# Common imports
import numpy as np
import pandas as pd
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 4
N = 100
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
Xpd = pd.DataFrame(X)
# subtract the mean values and set up the covariance matrix
Xpd = Xpd - Xpd.mean()
covariance_matrix = Xpd.cov()
print(covariance_matrix)
We note here that the covariance is zero for the first rows and
columns since all matrix elements in the design matrix were set to one
(we are fitting the function in terms of a polynomial of degree $n$).
This means that the variance for these elements will be zero and will
cause problems when we set up the correlation matrix. We can simply
drop these elements and construct a correlation
matrix without these elements.
We can rewrite the covariance matrix in a more compact form in terms of the design/feature matrix $\boldsymbol{X}$ as
$$
\boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}^T\boldsymbol{X}= \mathbb{E}[\boldsymbol{X}^T\boldsymbol{X}].
$$
To see this let us simply look at a design matrix $\boldsymbol{X}\in {\mathbb{R}}^{2\times 2}$
$$
\boldsymbol{X}=\begin{bmatrix}
x_{00} & x_{01}\\
x_{10} & x_{11}\\
\end{bmatrix}=\begin{bmatrix}
\boldsymbol{x}_{0} & \boldsymbol{x}_{1}\\
\end{bmatrix}.
$$
If we then compute the expectation value
$$
\mathbb{E}[\boldsymbol{X}^T\boldsymbol{X}] = \frac{1}{n}\boldsymbol{X}^T\boldsymbol{X}=\begin{bmatrix}
x_{00}^2+x_{01}^2 & x_{00}x_{10}+x_{01}x_{11}\\
x_{10}x_{00}+x_{11}x_{01} & x_{10}^2+x_{11}^2\\
\end{bmatrix},
$$
which is just
$$
\boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]=\begin{bmatrix} \mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] \\
\mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] \\
\end{bmatrix},
$$
where we wrote $$\boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]$$ to indicate that this the covariance of the vectors $\boldsymbol{x}$ of the design/feature matrix $\boldsymbol{X}$.
It is easy to generalize this to a matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$.
## Linking with SVD |
abenzbiria/clients_odoo | refs/heads/master | openerp/workflow/instance.py | 314 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import workitem
from openerp.workflow.helpers import Session
from openerp.workflow.helpers import Record
from openerp.workflow.workitem import WorkflowItem
class WorkflowInstance(object):
def __init__(self, session, record, values):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
if not values:
values = {}
assert isinstance(values, dict)
self.instance = values
@classmethod
def create(cls, session, record, workflow_id):
assert isinstance(session, Session)
assert isinstance(record, Record)
assert isinstance(workflow_id, (int, long))
cr = session.cr
cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id,state) values (%s,%s,%s,%s,%s) RETURNING id', (record.model, record.id, session.uid, workflow_id, 'active'))
instance_id = cr.fetchone()[0]
cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (workflow_id,))
stack = []
activities = cr.dictfetchall()
for activity in activities:
WorkflowItem.create(session, record, activity, instance_id, stack)
cr.execute('SELECT * FROM wkf_instance WHERE id = %s', (instance_id,))
values = cr.dictfetchone()
wi = WorkflowInstance(session, record, values)
wi.update()
return wi
def delete(self):
self.session.cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
def validate(self, signal, force_running=False):
assert isinstance(signal, basestring)
assert isinstance(force_running, bool)
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
stack = []
for work_item_values in cr.dictfetchall():
wi = WorkflowItem(self.session, self.record, work_item_values)
wi.process(signal=signal, force_running=force_running, stack=stack)
# An action is returned
self._update_end()
return stack and stack[0] or False
def update(self):
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
for work_item_values in cr.dictfetchall():
stack = []
WorkflowItem(self.session, self.record, work_item_values).process(stack=stack)
return self._update_end()
def _update_end(self):
cr = self.session.cr
instance_id = self.instance['id']
cr.execute('select wkf_id from wkf_instance where id=%s', (instance_id,))
wkf_id = cr.fetchone()[0]
cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
ok=True
for r in cr.fetchall():
if (r[0]<>'complete') or not r[1]:
ok=False
break
if ok:
cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
act_names = cr.fetchall()
cr.execute("update wkf_instance set state='complete' where id=%s", (instance_id,))
cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (instance_id,))
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (instance_id,))
for cur_instance_id, cur_model_name, cur_record_id in cr.fetchall():
cur_record = Record(cur_model_name, cur_record_id)
for act_name in act_names:
WorkflowInstance(self.session, cur_record, {'id':cur_instance_id}).validate('subflow.%s' % act_name[0])
return ok
def create(session, record, workflow_id):
return WorkflowInstance(session, record).create(workflow_id)
def delete(session, record):
return WorkflowInstance(session, record).delete()
def validate(session, record, instance_id, signal, force_running=False):
return WorkflowInstance(session, record).validate(instance_id, signal, force_running)
def update(session, record, instance_id):
return WorkflowInstance(session, record).update(instance_id)
def _update_end(session, record, instance_id):
return WorkflowInstance(session, record)._update_end(instance_id)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
stevielu/viewfinder | refs/heads/master | marketing/tornado/stack_context_new.py | 58 | #!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""`StackContext` allows applications to maintain threadlocal-like state
that follows execution as it moves to other execution contexts.
The motivating examples are to eliminate the need for explicit
``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
allow some additional context to be kept for logging.
This is slightly magic, but it's an extension of the idea that an
exception handler is a kind of stack-local state and when that stack
is suspended and resumed in a new context that state needs to be
preserved. `StackContext` shifts the burden of restoring that state
from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
in ``async_callback``) to the mechanisms that transfer control from
one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
thread pools, etc).
Example usage::
@contextlib.contextmanager
def die_on_error():
try:
yield
except Exception:
logging.error("exception in asynchronous operation",exc_info=True)
sys.exit(1)
with StackContext(die_on_error):
# Any exception thrown here *or in callback and its desendents*
# will cause the process to exit instead of spinning endlessly
# in the ioloop.
http_client.fetch(url, callback)
ioloop.start()
Most applications shouln't have to work with `StackContext` directly.
Here are a few rules of thumb for when it's necessary:
* If you're writing an asynchronous library that doesn't rely on a
stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
(for example, if you're writing a thread pool), use
`.stack_context.wrap()` before any asynchronous operations to capture the
stack context from where the operation was started.
* If you're writing an asynchronous library that has some shared
resources (such as a connection pool), create those shared resources
within a ``with stack_context.NullContext():`` block. This will prevent
``StackContexts`` from leaking from one request to another.
* If you want to write something like an exception handler that will
persist across asynchronous calls, create a new `StackContext` (or
`ExceptionStackContext`), and make your asynchronous calls in a ``with``
block that references your `StackContext`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
import threading
from tornado.util import raise_exc_info
class StackContextInconsistentError(Exception):
pass
class _State(threading.local):
def __init__(self):
self.contexts = (tuple(), None)
_state = _State()
class StackContext(object):
"""Establishes the given context as a StackContext that will be transferred.
Note that the parameter is a callable that returns a context
manager, not the context itself. That is, where for a
non-transferable context manager you would say::
with my_context():
StackContext takes the function itself rather than its result::
with StackContext(my_context):
The result of ``with StackContext() as cb:`` is a deactivation
callback. Run this callback when the StackContext is no longer
needed to ensure that it is not propagated any further (note that
deactivating a context does not affect any instances of that
context that are currently pending). This is an advanced feature
and not necessary in most applications.
"""
def __init__(self, context_factory):
self.context_factory = context_factory
self.contexts = []
self.active = True
def _deactivate(self):
self.active = False
# StackContext protocol
def enter(self):
context = self.context_factory()
self.contexts.append(context)
context.__enter__()
def exit(self, type, value, traceback):
context = self.contexts.pop()
context.__exit__(type, value, traceback)
# Note that some of this code is duplicated in ExceptionStackContext
# below. ExceptionStackContext is more common and doesn't need
# the full generality of this class.
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0] + (self,), self)
_state.contexts = self.new_contexts
try:
self.enter()
except:
_state.contexts = self.old_contexts
raise
return self._deactivate
def __exit__(self, type, value, traceback):
try:
self.exit(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
# Generator coroutines and with-statements with non-local
# effects interact badly. Check here for signs of
# the stack getting out of sync.
# Note that this check comes after restoring _state.context
# so that if it fails things are left in a (relatively)
# consistent state.
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class ExceptionStackContext(object):
"""Specialization of StackContext for exception handling.
The supplied ``exception_handler`` function will be called in the
event of an uncaught exception in this context. The semantics are
similar to a try/finally clause, and intended use cases are to log
an error, close a socket, or similar cleanup actions. The
``exc_info`` triple ``(type, value, traceback)`` will be passed to the
exception_handler function.
If the exception handler returns true, the exception will be
consumed and will not be propagated to other exception handlers.
"""
def __init__(self, exception_handler):
self.exception_handler = exception_handler
self.active = True
def _deactivate(self):
self.active = False
def exit(self, type, value, traceback):
if type is not None:
return self.exception_handler(type, value, traceback)
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0], self)
_state.contexts = self.new_contexts
return self._deactivate
def __exit__(self, type, value, traceback):
try:
if type is not None:
return self.exception_handler(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class NullContext(object):
"""Resets the `StackContext`.
Useful when creating a shared resource on demand (e.g. an
`.AsyncHTTPClient`) where the stack that caused the creating is
not relevant to future operations.
"""
def __enter__(self):
self.old_contexts = _state.contexts
_state.contexts = (tuple(), None)
def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
def _remove_deactivated(contexts):
"""Remove deactivated handlers from the chain"""
# Clean ctx handlers
stack_contexts = tuple([h for h in contexts[0] if h.active])
# Find new head
head = contexts[1]
while head is not None and not head.active:
head = head.old_contexts[1]
# Process chain
ctx = head
while ctx is not None:
parent = ctx.old_contexts[1]
while parent is not None:
if parent.active:
break
ctx.old_contexts = parent.old_contexts
parent = parent.old_contexts[1]
ctx = parent
return (stack_contexts, head)
def wrap(fn):
"""Returns a callable object that will restore the current `StackContext`
when executed.
Use this whenever saving a callback to be executed later in a
different execution context (either in a different thread or
asynchronously in the same thread).
"""
# Check if function is already wrapped
if fn is None or hasattr(fn, '_wrapped'):
return fn
# Capture current stack head
# TODO: Any other better way to store contexts and update them in wrapped function?
cap_contexts = [_state.contexts]
def wrapped(*args, **kwargs):
ret = None
try:
# Capture old state
current_state = _state.contexts
# Remove deactivated items
cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
# Force new state
_state.contexts = contexts
# Current exception
exc = (None, None, None)
top = None
# Apply stack contexts
last_ctx = 0
stack = contexts[0]
# Apply state
for n in stack:
try:
n.enter()
last_ctx += 1
except:
# Exception happened. Record exception info and store top-most handler
exc = sys.exc_info()
top = n.old_contexts[1]
# Execute callback if no exception happened while restoring state
if top is None:
try:
ret = fn(*args, **kwargs)
except:
exc = sys.exc_info()
top = contexts[1]
# If there was exception, try to handle it by going through the exception chain
if top is not None:
exc = _handle_exception(top, exc)
else:
# Otherwise take shorter path and run stack contexts in reverse order
while last_ctx > 0:
last_ctx -= 1
c = stack[last_ctx]
try:
c.exit(*exc)
except:
exc = sys.exc_info()
top = c.old_contexts[1]
break
else:
top = None
# If if exception happened while unrolling, take longer exception handler path
if top is not None:
exc = _handle_exception(top, exc)
# If exception was not handled, raise it
if exc != (None, None, None):
raise_exc_info(exc)
finally:
_state.contexts = current_state
return ret
wrapped._wrapped = True
return wrapped
def _handle_exception(tail, exc):
while tail is not None:
try:
if tail.exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
tail = tail.old_contexts[1]
return exc
def run_with_stack_context(context, func):
"""Run a coroutine ``func`` in the given `StackContext`.
It is not safe to have a ``yield`` statement within a ``with StackContext``
block, so it is difficult to use stack context with `.gen.coroutine`.
This helper function runs the function in the correct context while
keeping the ``yield`` and ``with`` statements syntactically separate.
Example::
@gen.coroutine
def incorrect():
with StackContext(ctx):
# ERROR: this will raise StackContextInconsistentError
yield other_coroutine()
@gen.coroutine
def correct():
yield run_with_stack_context(StackContext(ctx), other_coroutine)
.. versionadded:: 3.1
"""
with context:
return func()
|
HMSBeagle1831/rapidscience | refs/heads/master | rlp/bibliography/views.py | 1 | from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.contrib import messages
from django.db import transaction
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.views.decorators.cache import never_cache
from actstream import action
from taggit.models import Tag
from rlp.discussions.models import ThreadedComment
from rlp.discussions.shortcuts import get_comments_for_instance
from . import choices
from .forms import SearchForm, BookForm, BookSectionForm, JournalArticleForm, ProjectReferenceForm, ReferenceShareForm
from .models import Reference, ProjectReference
from rlp.core.email import send_transactional_mail
from rlp.projects.models import Project
@never_cache
@login_required
def reference_search(request, pk, slug, template_name='bibliography/reference_search.html'):
project = get_object_or_404(Project, pk=pk, slug=slug)
results = None
query = request.GET.get('q', '')
if query:
form = SearchForm(request.GET)
if form.is_valid():
results = form.results
if not results:
messages.warning(request, "No results found for {}".format(query))
else:
form = SearchForm()
context = {
'form': form,
'project': project,
'tab': 'bibliography',
'results': results,
'query': query
}
return render(request, template_name, context)
@never_cache
@login_required
def add_book(request, pk, slug, reference_pk=None, template_name='bibliography/add_book.html'):
project = get_object_or_404(Project, pk=pk, slug=slug)
if reference_pk:
instance = get_object_or_404(Reference, pk=reference_pk)
project_reference = ProjectReference.objects.get(
project=project,
reference=instance,
owner=request.user
)
else:
instance = None
if request.method == 'POST':
# Make a copy so we can manipulate
POST = request.POST.copy()
# Remove tags, if present
tag_ids = POST.pop('tags', [])
form = BookForm(POST, request.FILES, instance=instance)
if form.is_valid():
with transaction.atomic():
reference = form.save()
if reference_pk:
messages.success(request, "Reference updated successfully!")
else:
project_reference = ProjectReference.objects.create(
project=project, reference=reference, owner=request.user)
action.send(request.user, verb='added', action_object=project_reference, target=project)
messages.success(request, "Reference added successfully!")
# Set the tags
if tag_ids:
try:
tags = Tag.objects.filter(id__in=tag_ids)
except:
tags = []
project_reference.tags.set(*tags)
# Trigger any post-save signals (e.g. Haystack's real-time indexing)
project_reference.save()
return redirect(project.get_bibliography_url())
else:
if instance:
data = instance.raw_data.copy()
if instance.upload:
data['upload'] = instance.upload
data['tags'] = project_reference.tags.all()
form = BookForm(initial=data)
else:
form = BookForm()
context = {
'form': form,
'instance': instance,
'project': project,
'tab': 'bibliography',
}
return render(request, template_name, context)
@never_cache
@login_required
def add_book_chapter(request, pk, slug, reference_pk=None, template_name='bibliography/add_book_chapter.html'):
project = get_object_or_404(Project, pk=pk, slug=slug)
if reference_pk:
instance = get_object_or_404(Reference, pk=reference_pk)
project_reference = ProjectReference.objects.get(
project=project,
reference=instance,
owner=request.user
)
else:
instance = None
if request.method == 'POST':
# Make a copy so we can manipulate
POST = request.POST.copy()
# Remove tags, if present
tag_ids = POST.pop('tags', [])
form = BookSectionForm(POST, request.FILES, instance=instance)
if form.is_valid():
with transaction.atomic():
reference = form.save()
if reference_pk:
messages.success(request, "Reference updated successfully!")
else:
project_reference = ProjectReference.objects.create(
project=project, reference=reference, owner=request.user)
action.send(request.user, verb='added', action_object=project_reference, target=project)
messages.success(request, "Reference added successfully!")
# Set the tags
if tag_ids:
try:
tags = Tag.objects.filter(id__in=tag_ids)
except:
tags = []
project_reference.tags.set(*tags)
# Trigger any post-save signals (e.g. Haystack's real-time indexing)
project_reference.save()
return redirect(project.get_bibliography_url())
else:
if instance:
data = instance.raw_data.copy()
if instance.upload:
data['upload'] = instance.upload
data['tags'] = project_reference.tags.all()
form = BookSectionForm(initial=data)
else:
form = BookSectionForm()
context = {
'form': form,
'instance': instance,
'project': project,
'tab': 'bibliography',
}
return render(request, template_name, context)
@never_cache
@login_required
def add_article(request, pk, slug, reference_pk=None, template_name='bibliography/add_article.html'):
project = get_object_or_404(Project, pk=pk, slug=slug)
if reference_pk:
instance = get_object_or_404(Reference, pk=reference_pk)
project_reference = ProjectReference.objects.get(
project=project,
reference=instance,
owner=request.user
)
else:
instance = None
if request.method == 'POST':
# Make a copy so we can manipulate
POST = request.POST.copy()
# Remove tags, if present
tag_ids = POST.pop('tags', [])
form = JournalArticleForm(POST, request.FILES, instance=instance)
if form.is_valid():
with transaction.atomic():
reference = form.save()
if reference_pk:
messages.success(request, "Reference updated successfully!")
else:
project_reference = ProjectReference.objects.create(
project=project, reference=reference, owner=request.user)
action.send(request.user, verb='added', action_object=project_reference, target=project)
messages.success(request, "Reference added successfully!")
# Set the tags
if tag_ids:
try:
tags = Tag.objects.filter(id__in=tag_ids)
except:
tags = []
project_reference.tags.set(*tags)
# Trigger any post-save signals (e.g. Haystack's real-time indexing)
project_reference.save()
return redirect(project.get_bibliography_url())
else:
if instance:
data = instance.raw_data.copy()
if instance.upload:
data['upload'] = instance.upload
data['tags'] = project_reference.tags.all()
data['publication_date'] = datetime.strptime(data['publication_date'], '%d %b %Y')
form = JournalArticleForm(initial=data)
else:
form = JournalArticleForm()
context = {
'form': form,
'instance': instance,
'project': project,
'tab': 'bibliography',
}
return render(request, template_name, context)
@never_cache
@login_required
def reference_add(request, pk, slug, reference_pk, edit=False):
project = get_object_or_404(Project, pk=pk, slug=slug)
reference = get_object_or_404(Reference, pk=reference_pk)
with transaction.atomic():
project_reference, created = ProjectReference.objects.get_or_create(
reference=reference, project=project, defaults={'owner': request.user}
)
if created:
action.send(request.user, verb='added', action_object=project_reference, target=project)
messages.success(
request,
'Reference added successfully! If you are finished adding references, '
'you can return to the <a href="{}">bibliography list</a>.'.format(project.get_bibliography_url()))
else:
messages.info(request, "The reference you selected was already added to this project by {}.".format(
project_reference.owner.get_full_name()))
if request.is_ajax():
context = {
'messages': render_to_string('bootstrap3/messages.html', {}, request=request),
}
return JsonResponse(context)
return redirect(project.get_bibliography_url())
@never_cache
@login_required
def reference_edit(request, pk, slug, reference_pk, template_name='bibliography/edit_reference.html'):
reference = get_object_or_404(Reference, pk=reference_pk)
# For manually added references, show the custom forms for each type
if 'reference_type' in reference.raw_data:
if reference.raw_data['reference_type'] == choices.JOURNAL_ARTICLE:
return add_article(request, pk, slug, reference_pk)
elif reference.raw_data['reference_type'] == choices.BOOK:
return add_book(request, pk, slug, reference_pk)
elif reference.raw_data['reference_type'] == choices.BOOK_SECTION:
return add_book_chapter(request, pk, slug, reference_pk)
# The rest of this view is for adding tags to references coming from Pubmed or Crossref
project = get_object_or_404(Project, pk=pk, slug=slug)
project_reference = ProjectReference.objects.get(project=project, reference=reference)
# Need to edit the 'project reference', that's what should get the tags right?
if request.method == 'POST':
form = ProjectReferenceForm(request.POST, instance=project_reference)
if form.is_valid():
tags = form.cleaned_data.get('tags') or []
project_reference.tags.set(*tags)
# Trigger any post-save signals (e.g. Haystack's real-time indexing)
project_reference.save()
messages.success(request, "Reference updated successfully!")
return redirect(project.get_bibliography_url())
else:
form = ProjectReferenceForm(instance=project_reference, initial={'tags': project_reference.tags.all()})
# It shouldn't be possible to land on the edit page if there aren't any tags, but just in case, remove the form
if not Tag.objects.count():
form = None
context = {
'form': form,
'instance': project_reference,
'project': project,
'tab': 'bibliography',
}
return render(request, template_name, context)
@never_cache
@login_required
def reference_share(request, reference_pk):
project_reference = get_object_or_404(ProjectReference, pk=reference_pk)
if request.method == 'POST':
form = ReferenceShareForm(request.POST)
if form.is_valid():
share = form.save(commit=False)
share.user = request.user
share.reference = project_reference.reference
share.save()
form.save_m2m()
# Collect the recipients using a set() so we don't send duplicate messages (as a share.recipient and as a
# project member).
recipients = set(share.recipients.all())
if share.group:
# put it in the project's timeline
action.send(request.user, verb='shared', action_object=share, target=share.group)
# Only email individuals who were specifically selected
for user in recipients:
send_transactional_mail(
user.email,
'A reference has been shared with you',
'emails/shared_reference_notification', {
'share': share,
}
)
messages.success(request, 'This reference was successfully shared!')
context = {
'messages': render_to_string('bootstrap3/messages.html', {}, request=request),
'form': render_to_string(
'bibliography/_share_reference_form.html',
{'form': ReferenceShareForm()},
request=request
),
}
else:
context = {
'form': render_to_string('bibliography/_share_reference_form.html', {'form': form}, request=request),
}
else:
context = {
'form': render_to_string(
'bibliography/_share_reference_form.html',
{'form': ReferenceShareForm()},
request=request
),
}
return JsonResponse(context)
@never_cache
@login_required
def reference_delete(request, pk, slug, reference_pk, template_name='bibliography/reference_delete.html'):
project = get_object_or_404(Project, pk=pk, slug=slug)
project_reference = get_object_or_404(
ProjectReference, reference__id=reference_pk, project=project, owner=request.user)
if request.POST:
title = project_reference.reference.title
ctype = ContentType.objects.get_for_model(project_reference.__class__)
comment_ctype = ContentType.objects.get_for_model(ThreadedComment)
with transaction.atomic():
# Recursively find all comments and replies for this project_reference and delete them.
# TODO: turn this into a model manager method
qs_to_delete = ThreadedComment.objects.filter(object_pk=project_reference.id, content_type=ctype)
comment_children_ids = list(qs_to_delete.values_list('id', flat=True))
qs_to_delete.delete()
while comment_children_ids:
qs_to_delete = ThreadedComment.objects.filter(
content_type=comment_ctype, parent_id__in=comment_children_ids)
comment_children_ids = list(qs_to_delete.values_list('id', flat=True))
qs_to_delete.delete()
project_reference.delete()
messages.success(request, "Successfully deleted '{}'".format(title))
return redirect(project.get_bibliography_url())
context = {
'project': project,
'obj': project_reference,
'tab': 'bibliography',
}
return render(request, template_name, context)
@never_cache
@login_required
def reference_detail(request, pk, slug, reference_pk, template_name='bibliography/reference_detail.html'):
project = get_object_or_404(Project, pk=pk, slug=slug)
project_reference = get_object_or_404(ProjectReference, reference__id=reference_pk, project=project)
context = {
'obj': project_reference,
'project': project,
'tab': 'bibliography',
'comment_list': get_comments_for_instance(project_reference)
}
return render(request, template_name, context)
|
kevindierkx/oauth2-server-manager | refs/heads/master | node_modules/laravel-elixir/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
lsj1292/Abacus | refs/heads/master | biohub/core/plugins/urls.py | 4 | from biohub.core.routes import register_api, register_default, url # noqa
# Place your route definition here.
|
jcrugzz/lpvisualization | refs/heads/master | django/core/servers/basehttp.py | 153 | """
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import os
import re
import socket
import sys
import urllib
import warnings
from django.core.management.color import color_style
from django.utils.http import http_date
from django.utils._os import safe_join
from django.views import static
from django.contrib.staticfiles import handlers
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def __init__(self, *args, **kwargs):
if kwargs.pop('ipv6', False):
self.address_family = socket.AF_INET6
HTTPServer.__init__(self, *args, **kwargs)
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException(e)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
class AdminMediaHandler(handlers.StaticFilesHandler):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
This is pending for deprecation since 1.3.
"""
def get_base_dir(self):
import django
return os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
def get_base_url(self):
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
if not settings.ADMIN_MEDIA_PREFIX:
raise ImproperlyConfigured(
"The ADMIN_MEDIA_PREFIX setting can't be empty "
"when using the AdminMediaHandler, e.g. with runserver.")
return settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``self.base_url``. If the
resulting file path is outside the media directory, then a ValueError
is raised.
"""
relative_url = url[len(self.base_url[2]):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.base_dir, relative_path)
def serve(self, request):
document_root, path = os.path.split(self.file_path(request.path))
return static.serve(request, path, document_root=document_root)
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the base path
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def run(addr, port, wsgi_handler, ipv6=False):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler, ipv6=ipv6)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
|
ds-hwang/chromium-crosswalk | refs/heads/master | native_client_sdk/src/build_tools/tests/verify_filelist_test.py | 132 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import verify_filelist
def Verify(platform, rules_contents, directory_list):
rules = verify_filelist.Rules('test', platform, rules_contents)
rules.VerifyDirectoryList(directory_list)
class VerifyFilelistTestCase(unittest.TestCase):
def testBasic(self):
rules = """\
foo/file1
foo/file2
foo/file3
bar/baz/other
"""
dirlist = ['foo/file1', 'foo/file2', 'foo/file3', 'bar/baz/other']
Verify('linux', rules, dirlist)
def testGlob(self):
rules = 'foo/*'
dirlist = ['foo/file1', 'foo/file2', 'foo/file3/and/subdir']
Verify('linux', rules, dirlist)
def testPlatformVar(self):
rules = 'dir/${PLATFORM}/blah'
dirlist = ['dir/linux/blah']
Verify('linux', rules, dirlist)
def testPlatformVarGlob(self):
rules = 'dir/${PLATFORM}/*'
dirlist = ['dir/linux/file1', 'dir/linux/file2']
Verify('linux', rules, dirlist)
def testPlatformRule(self):
rules = """\
[linux]dir/linux/only
all/platforms
"""
linux_dirlist = ['dir/linux/only', 'all/platforms']
other_dirlist = ['all/platforms']
Verify('linux', rules, linux_dirlist)
Verify('mac', rules, other_dirlist)
def testMultiPlatformRule(self):
rules = """\
[linux,win]dir/no/macs
all/platforms
"""
nonmac_dirlist = ['dir/no/macs', 'all/platforms']
mac_dirlist = ['all/platforms']
Verify('linux', rules, nonmac_dirlist)
Verify('win', rules, nonmac_dirlist)
Verify('mac', rules, mac_dirlist)
def testPlatformRuleBadPlatform(self):
rules = '[frob]bad/platform'
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, [])
def testMissingFile(self):
rules = """\
foo/file1
foo/missing
"""
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testExtraFile(self):
rules = 'foo/file1'
dirlist = ['foo/file1', 'foo/extra_file']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testEmptyGlob(self):
rules = 'foo/*'
dirlist = ['foo'] # Directory existing is not enough!
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testBadGlob(self):
rules = '*/foo/bar'
dirlist = []
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, dirlist)
def testUnknownPlatform(self):
rules = 'foo'
dirlist = ['foo']
for platform in ('linux', 'mac', 'win'):
Verify(platform, rules, dirlist)
self.assertRaises(verify_filelist.ParseException, Verify,
'foobar', rules, dirlist)
def testUnexpectedPlatformFile(self):
rules = '[mac,win]foo/file1'
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testWindowsPaths(self):
if os.path.sep != '/':
rules = 'foo/bar/baz'
dirlist = ['foo\\bar\\baz']
Verify('win', rules, dirlist)
else:
rules = 'foo/bar/baz\\foo'
dirlist = ['foo/bar/baz\\foo']
Verify('linux', rules, dirlist)
def testNestedGlobs(self):
rules = """\
foo/*
foo/bar/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
rules = """\
foo/bar/*
foo/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
if __name__ == '__main__':
unittest.main()
|
jayme-github/headphones | refs/heads/master | lib/apscheduler/schedulers/asyncio.py | 33 | from __future__ import absolute_import
from functools import wraps
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref
try:
import asyncio
except ImportError: # pragma: nocover
try:
import trollius as asyncio
except ImportError:
raise ImportError('AsyncIOScheduler requires either Python 3.4 or the asyncio package installed')
def run_in_event_loop(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self._eventloop.call_soon_threadsafe(func, self, *args, **kwargs)
return wrapper
class AsyncIOScheduler(BaseScheduler):
"""
A scheduler that runs on an asyncio (:pep:`3156`) event loop.
Extra options:
============== =============================================================
``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
============== =============================================================
"""
_eventloop = None
_timeout = None
def start(self):
super(AsyncIOScheduler, self).start()
self.wakeup()
@run_in_event_loop
def shutdown(self, wait=True):
super(AsyncIOScheduler, self).shutdown(wait)
self._stop_timer()
def _configure(self, config):
self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop()
super(AsyncIOScheduler, self)._configure(config)
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
def _stop_timer(self):
if self._timeout:
self._timeout.cancel()
del self._timeout
@run_in_event_loop
def wakeup(self):
self._stop_timer()
wait_seconds = self._process_jobs()
self._start_timer(wait_seconds)
def _create_default_executor(self):
from apscheduler.executors.asyncio import AsyncIOExecutor
return AsyncIOExecutor()
|
sauloal/cufflinksviewer | refs/heads/master | venvwin/Lib/site-packages/pip-1.2.1-py2.7.egg/pip/download.py | 17 | import cgi
import getpass
import hashlib
import mimetypes
import os
import re
import shutil
import sys
import tempfile
from pip.backwardcompat import (xmlrpclib, urllib, urllib2,
urlparse, string_types)
from pip.exceptions import InstallationError
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
__all__ = ['xmlrpclib_transport', 'get_file_content', 'urlopen',
'is_url', 'url_to_path', 'path_to_url', 'path_to_url2',
'geturl', 'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
xmlrpclib_transport = xmlrpclib.Transport()
def get_file_content(url, comes_from=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content)"""
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = urlopen(url)
return geturl(resp), resp.read()
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
class URLOpener(object):
"""
pip's own URL helper that adds HTTP auth and proxy support
"""
def __init__(self):
self.passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
def __call__(self, url):
"""
If the given url contains auth info or if a normal request gets a 401
response, an attempt is made to fetch the resource using basic HTTP
auth.
"""
url, username, password = self.extract_credentials(url)
if username is None:
try:
response = urllib2.urlopen(self.get_request(url))
except urllib2.HTTPError:
e = sys.exc_info()[1]
if e.code != 401:
raise
response = self.get_response(url)
else:
response = self.get_response(url, username, password)
return response
def get_request(self, url):
"""
Wraps the URL to retrieve to protects against "creative"
interpretation of the RFC: http://bugs.python.org/issue8732
"""
if isinstance(url, string_types):
url = urllib2.Request(url, headers={'Accept-encoding': 'identity'})
return url
def get_response(self, url, username=None, password=None):
"""
does the dirty work of actually getting the rsponse object using urllib2
and its HTTP auth builtins.
"""
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
req = self.get_request(url)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
# see if we have a password stored
if stored_username is None:
if username is None and self.prompting:
username = urllib.quote(raw_input('User for %s: ' % netloc))
password = urllib.quote(getpass.getpass('Password: '))
if username and password:
self.passman.add_password(None, netloc, username, password)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
authhandler = urllib2.HTTPBasicAuthHandler(self.passman)
opener = urllib2.build_opener(authhandler)
# FIXME: should catch a 401 and offer to let the user reenter credentials
return opener.open(req)
def setup(self, proxystr='', prompting=True):
"""
Sets the proxy handler given the option passed on the command
line. If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
self.prompting = prompting
proxy = self.get_proxy(proxystr)
if proxy:
proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy, "https": proxy})
opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler)
urllib2.install_opener(opener)
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
def extract_credentials(self, url):
"""
Extracts user/password from a url.
Returns a tuple:
(url-without-auth, username, password)
"""
if isinstance(url, urllib2.Request):
result = urlparse.urlsplit(url.get_full_url())
else:
result = urlparse.urlsplit(url)
scheme, netloc, path, query, frag = result
username, password = self.parse_credentials(netloc)
if username is None:
return url, None, None
elif password is None and self.prompting:
# remove the auth credentials from the url part
netloc = netloc.replace('%s@' % username, '', 1)
# prompt for the password
prompt = 'Password for %s@%s: ' % (username, netloc)
password = urllib.quote(getpass.getpass(prompt))
else:
# remove the auth credentials from the url part
netloc = netloc.replace('%s:%s@' % (username, password), '', 1)
target_url = urlparse.urlunsplit((scheme, netloc, path, query, frag))
return target_url, username, password
def get_proxy(self, proxystr=''):
"""
Get the proxy given the option passed on the command line.
If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
if not proxystr:
proxystr = os.environ.get('HTTP_PROXY', '')
if proxystr:
if '@' in proxystr:
user_password, server_port = proxystr.split('@', 1)
if ':' in user_password:
user, password = user_password.split(':', 1)
else:
user = user_password
prompt = 'Password for %s@%s: ' % (user, server_port)
password = urllib.quote(getpass.getpass(prompt))
return '%s:%s@%s' % (user, password, server_port)
else:
return proxystr
else:
return None
urlopen = URLOpener()
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute.
"""
path = os.path.normcase(os.path.abspath(path))
if _drive_re.match(path):
path = path[0] + '|' + path[2:]
url = urllib.quote(path)
url = url.replace(os.path.sep, '/')
url = url.lstrip('/')
return 'file:///' + url
def path_to_url2(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def geturl(urllib2_resp):
"""
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
"""
url = urllib2_resp.geturl()
scheme, rest = url.split(':', 1)
if rest.startswith('//'):
return url
else:
# FIXME: write a good test to cover it
return '%s://%s' % (scheme, rest)
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def unpack_file_url(link, location):
source = url_to_path(link.url)
content_type = mimetypes.guess_type(source)[0]
if os.path.isdir(source):
# delete the location since shutil will create it again :(
if os.path.isdir(location):
rmtree(location)
shutil.copytree(source, location)
else:
unpack_file(source, location, content_type, link)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise InstallationError('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash, link.hash))
raise InstallationError('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.info()['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40*1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.debug('Downloading from URL %s' % link)
while True:
chunk = resp.read(4096)
if not chunk:
break
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100*downloaded/total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.indent -= 2
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None):
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
target_url = link.url.split('#', 1)[0]
target_file = None
download_hash = None
if download_cache:
target_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
if (target_file
and os.path.exists(target_file)
and os.path.exists(target_file + '.content-type')):
fp = open(target_file+'.content-type')
content_type = fp.read().strip()
fp.close()
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(target_file, link)
temp_location = target_file
logger.notify('Using download cache from %s' % target_file)
else:
resp = _get_response_from_url(target_url, link)
content_type = resp.info()['content-type']
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != geturl(resp):
ext = os.path.splitext(geturl(resp))[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
if download_dir:
_copy_file(temp_location, download_dir, content_type, link)
unpack_file(temp_location, location, content_type, link)
if target_file and target_file != temp_location:
cache_download(target_file, temp_location, content_type)
if target_file is None:
os.unlink(temp_location)
os.rmdir(temp_dir)
def _get_response_from_url(target_url, link):
try:
resp = urlopen(target_url)
except urllib2.HTTPError:
e = sys.exc_info()[1]
logger.fatal("HTTP error %s while getting %s" % (e.code, link))
raise
except IOError:
e = sys.exc_info()[1]
# Typically an FTP error
logger.fatal("Error %s while getting %s" % (e, link))
raise
return resp
class Urllib2HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
|
mmardini/django | refs/heads/master | django/http/utils.py | 134 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
|
ycl2045/nova-master | refs/heads/master | nova/virt/xenapi/network_utils.py | 8 | # Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of network
records and their attributes like bridges, PIFs, QoS, as well as
their lookup functions.
"""
from nova.openstack.common.gettextutils import _
def find_network_with_name_label(session, name_label):
networks = session.call_xenapi('network.get_by_name_label', name_label)
if len(networks) == 1:
return networks[0]
elif len(networks) > 1:
raise Exception(_('Found non-unique network for name_label %s') %
name_label)
else:
return None
def find_network_with_bridge(session, bridge):
"""Return the network on which the bridge is attached, if found.
The bridge is defined in the nova db and can be found either in the
'bridge' or 'name_label' fields of the XenAPI network record.
"""
expr = ('field "name__label" = "%s" or field "bridge" = "%s"' %
(bridge, bridge))
networks = session.call_xenapi('network.get_all_records_where', expr)
if len(networks) == 1:
return networks.keys()[0]
elif len(networks) > 1:
raise Exception(_('Found non-unique network for bridge %s') % bridge)
else:
raise Exception(_('Found no network for bridge %s') % bridge)
|
jctanner/ansibullbot | refs/heads/master | scripts/find_dead_issues.py | 1 | #!/usr/bin/env python
import datetime
import json
import os
import sys
from ansibullbot.utils.receiver_client import get_receiver_metadata
from ansibullbot.utils.receiver_client import get_receiver_summaries
def main():
# define where to dump the resulting files
if len(sys.argv) > 1:
destdir = sys.argv[1]
else:
destdir = '/tmp'
if not os.path.isdir(destdir):
os.makedirs(destdir)
ISSUES = {}
BYFILE = {}
BYISSUE = {}
BYMAINTAINER = {}
BYFILE_STATS = {}
summaries = get_receiver_summaries('ansible', 'ansible')
for summary in summaries:
number = summary['github_number']
this_meta = get_receiver_metadata('ansible', 'ansible', number=number)
if not this_meta:
continue
this_meta = this_meta[0]
url = this_meta['html_url']
ISSUES[url] = this_meta
BYISSUE[url] = []
try:
this_meta.get('component_matches', [])
except Exception as e:
print(e)
#import epdb; epdb.st()
continue
if summary['state'] == 'open':
created = datetime.datetime.strptime(
summary['created_at'].split('T')[0], '%Y-%m-%d'
)
age = (datetime.datetime.now() - created).days
else:
print(summary)
import epdb; epdb.st()
for component in this_meta.get('component_matches', []):
# we seem to have some variation in the keys ...
filename = None
try:
filename = component['repo_filename']
except KeyError:
filename = component['filename']
if not filename:
continue
if 'maintainers' in component:
for maintainer in component['maintainers']:
if maintainer not in BYMAINTAINER:
BYMAINTAINER[maintainer] = []
if url not in BYMAINTAINER[maintainer]:
BYMAINTAINER[maintainer].append(url)
if filename not in BYFILE_STATS:
BYFILE_STATS[filename] = {
'open_ages': [],
'closed_ages': []
}
if summary['state'] == 'open':
BYFILE_STATS[filename]['open_ages'].append([
age,
this_meta['template_data']['issue type']
])
else:
import epdb; epdb.st()
BYISSUE[url].append(filename)
if filename not in BYFILE:
BYFILE[filename] = []
if url not in BYFILE[filename]:
BYFILE[filename].append(url)
# median closure time?
import epdb; epdb.st()
# most active files?
import epdb; epdb.st()
'''
destfile = os.path.join(destdir, 'byissue.json')
with open(destfile, 'w') as f:
f.write(json.dumps(BYISSUE, indent=2, sort_keys=True))
destfile = os.path.join(destdir, 'byfile.json')
with open(destfile, 'w') as f:
f.write(json.dumps(BYFILE, indent=2, sort_keys=True))
tuples = BYFILE.items()
for idx, x in enumerate(tuples):
x = [x[0]] + x[1]
tuples[idx] = x
tuples.sort(key=len)
tuples.reverse()
destfile = os.path.join(destdir, 'byfile_sorted.txt')
with open(destfile, 'w') as f:
for tup in tuples:
f.write('{}\n'.format(tup[0]))
for issue in tup[1:]:
issue = issue.encode('ascii', 'ignore')
title = ISSUES[issue]['title']
title = title.encode('ascii', 'ignore')
f.write('\t{}\t{}\n'.format(issue, title))
destfile = os.path.join(destdir, 'byfile_sorted.html')
with open(destfile, 'w') as f:
for tup in tuples:
f.write('<div style="background-color: #cfc ; padding: 10px; border: 1px solid green;">\n')
file_ref = '<a href="https://github.com/ansible/ansible/blob/devel/{}">https://github.com/ansible/ansible/blob/devel/{}</a>'.format(tup[0], tup[0])
f.write('{}\n'.format(file_ref))
f.write('</div>')
f.write('<br>\n')
for issue in tup[1:]:
issue = issue.encode('ascii', 'ignore')
title = ISSUES[issue]['title']
title = title.encode('ascii', 'ignore')
issue_ref = '<a href="{}">{}</a>'.format(issue, issue)
f.write('\t{}\t{}<br>\n'.format(issue_ref, title))
f.write('<br>\n')
tuples = BYMAINTAINER.items()
for idx, x in enumerate(tuples):
x = [x[0]] + x[1]
tuples[idx] = x
tuples.sort(key=len)
tuples.reverse()
destfile = os.path.join(destdir, 'bymaintainer.json')
with open(destfile, 'w') as f:
f.write(json.dumps(BYMAINTAINER, indent=2, sort_keys=True))
destfile = os.path.join(destdir, 'bymaintainer_sorted.txt')
with open(destfile, 'w') as f:
for tup in tuples:
f.write('{}\n'.format(tup[0]))
for issue in tup[1:]:
f.write('\t{}\n'.format(issue))
'''
if __name__ == "__main__":
main()
|
mdj2/django | refs/heads/master | tests/urlpatterns_reverse/erroneous_views_module.py | 157 | import non_existent
def erroneous_view(request):
pass
|
kenshay/ImageScript | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/docutils/languages/zh_cn.py | 148 | # -*- coding: utf-8 -*-
# $Id: zh_cn.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Pan Junyong <panjy@zopechina.com>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Simplified Chinese language mappings for language-dependent features
of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'作者',
'authors': u'作者群',
'organization': u'组织',
'address': u'地址',
'contact': u'联系',
'version': u'版本',
'revision': u'修订',
'status': u'状态',
'date': u'日期',
'copyright': u'版权',
'dedication': u'献辞',
'abstract': u'摘要',
'attention': u'注意',
'caution': u'小心',
'danger': u'危险',
'error': u'错误',
'hint': u'提示',
'important': u'重要',
'note': u'注解',
'tip': u'技巧',
'warning': u'警告',
'contents': u'目录',
}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'作者': 'author',
u'作者群': 'authors',
u'组织': 'organization',
u'地址': 'address',
u'联系': 'contact',
u'版本': 'version',
u'修订': 'revision',
u'状态': 'status',
u'时间': 'date',
u'版权': 'copyright',
u'献辞': 'dedication',
u'摘要': 'abstract'}
"""Simplified Chinese to canonical name mapping for bibliographic fields."""
author_separators = [';', ',',
u'\uff1b', # ';'
u'\uff0c', # ','
u'\u3001', # '、'
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
msebire/intellij-community | refs/heads/master | python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_0/_pkg1_0_0/_pkg1_0_0_0/_pkg1_0_0_0_0/_mod1_0_0_0_0_4.py | 30 | name1_0_0_0_0_4_0 = None
name1_0_0_0_0_4_1 = None
name1_0_0_0_0_4_2 = None
name1_0_0_0_0_4_3 = None
name1_0_0_0_0_4_4 = None |
houshengbo/nova_vmware_compute_driver | refs/heads/attach-detach-VMware-iSCSI-driver | nova/openstack/common/gettextutils.py | 16 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from nova.openstack.common.gettextutils import _
"""
import gettext
t = gettext.translation('openstack-common', 'locale', fallback=True)
def _(msg):
return t.ugettext(msg)
|
beckett1124/Paddle | refs/heads/develop | python/paddle/utils/__init__.py | 19 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['dump_config']
|
ntt-nflex/nflex_connector_utils | refs/heads/master | nflex_connector_utils/saas_user.py | 1 | from . import Resource
class SaasUser(Resource):
"""
A representation of an Saas User
Args:
base (base): See :py:class:`nflex_connector_utils.resource.Resource` for common resource args.
user_id (str): Id of the Saas User
avatar_url (str): Avatal URL of the user
phone (str): Phone number of the user
address (str): Address of the Saas User
language (str): Preferred language of the user
is_active (Boolean): To check if the user is active or not
disk_quota_b (str): The Storage allocated to the user
disk_used_b (str): The Storage used by the user
""" # noqa
def __init__(self, avatar_url=None, phone=None, address=None,
language=None, is_active=None, disk_quota_b=None,
disk_used_b=None, email=None, country=None, **kwargs):
super(SaasUser, self).__init__(type='saas_user', **kwargs)
self._avatar_url = avatar_url
self._phone = phone
self._address = address
self._language = language
self._is_active = is_active
self._disk_quota_b = disk_quota_b
self._disk_used_b = disk_used_b
self._email = email
self._country = country
def serialize(self):
"""Serialize the contents"""
data = super(SaasUser, self).serialize()
data['details'] = {
self.type: {
"avatar_url": self._avatar_url,
"phone": self._phone,
"address": self._address,
"is_active": self._is_active,
"disk_quota_b": self._disk_quota_b,
"disk_used_b": self._disk_used_b,
"email": self._email,
"country": self._country,
"language": self._language,
}
}
return data
|
stainsteelcrown/nonsense-story-generator | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 1093 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
starrify/scrapy | refs/heads/master | scrapy/core/__init__.py | 216 | """
Scrapy core library classes and functions.
"""
|
itkovian/sqlalchemy | refs/heads/master | examples/graphs/__init__.py | 30 | """An example of persistence for a directed graph structure. The
graph is stored as a collection of edges, each referencing both a
"lower" and an "upper" node in a table of nodes. Basic persistence
and querying for lower- and upper- neighbors are illustrated::
n2 = Node(2)
n5 = Node(5)
n2.add_neighbor(n5)
print n2.higher_neighbors()
.. autosource::
""" |
mushtaqak/edx-platform | refs/heads/master | lms/djangoapps/courseware/migrations/0004_add_field_studentmodule_course_id.py | 194 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StudentModule.course_id'
db.add_column('courseware_studentmodule', 'course_id',
self.gf('django.db.models.fields.CharField')(default="", max_length=255, db_index=True),
keep_default=False)
# Removing unique constraint on 'StudentModule', fields ['module_id', 'student']
db.delete_unique('courseware_studentmodule', ['module_id', 'student_id'])
# NOTE: manually remove this constaint (from 0001)--0003 tries, but fails for sqlite.
# Removing unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
if db.backend_name == "sqlite3":
db.delete_unique('courseware_studentmodule', ['student_id', 'module_id', 'module_type'])
# Adding unique constraint on 'StudentModule', fields ['course_id', 'module_state_key', 'student']
db.create_unique('courseware_studentmodule', ['student_id', 'module_id', 'course_id'])
def backwards(self, orm):
# Removing unique constraint on 'StudentModule', fields ['studnet_id', 'module_state_key', 'course_id']
db.delete_unique('courseware_studentmodule', ['student_id', 'module_id', 'course_id'])
# Deleting field 'StudentModule.course_id'
db.delete_column('courseware_studentmodule', 'course_id')
# Adding unique constraint on 'StudentModule', fields ['module_id', 'student']
db.create_unique('courseware_studentmodule', ['module_id', 'student_id'])
# Adding unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
db.create_unique('courseware_studentmodule', ['student_id', 'module_id', 'module_type'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('course_id', 'student', 'module_state_key'),)", 'object_name': 'StudentModule'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['courseware']
|
Anik1199/Kernel_taoshan | refs/heads/cm-12.1 | scripts/build-all.py | 1182 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
idovear/odoo | refs/heads/master | addons/account/wizard/account_subscription_generate.py | 347 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_subscription_generate(osv.osv_memory):
_name = "account.subscription.generate"
_description = "Subscription Compute"
_columns = {
'date': fields.date('Generate Entries Before', required=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
}
def action_generate(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
sub_line_obj = self.pool.get('account.subscription.line')
moves_created=[]
for data in self.read(cr, uid, ids, context=context):
line_ids = sub_line_obj.search(cr, uid, [('date', '<', data['date']), ('move_id', '=', False)], context=context)
moves = sub_line_obj.move_create(cr, uid, line_ids, context=context)
moves_created.extend(moves)
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_form')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = str([('id','in',moves_created)])
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
google/filament | refs/heads/main | third_party/libgtest/googletest/test/gtest_test_utils.py | 64 | # Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing and Mocking Framework."""
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
import os
import sys
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
IS_OS2 = os.name == 'os2'
import atexit
import shutil
import tempfile
import unittest as _test_module
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN or IS_OS2) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True if and only if the child process has been
terminated by a signal.
signal Sygnal that terminated the child process.
exited True if and only if the child process exited
normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file object for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
jorgemarsal/DistributedR | refs/heads/master | platform/worker/scripts/generate_diagrams.py | 3 | import re
import pdb
import textwrap
import sys
from parse_request_trace import *
class JsSequenceDiagramGenerator:
def __init__(self):
self.master_ts_template = 'Note left of MASTER: %s\n'
self.worker_ts_template = 'Note right of WORKER: %s\n'
self.worker_template = 'MASTER->WORKER: "%s"\n'
self.master_template = 'WORKER->MASTER: "%s"\n'
def save_file(self,tasks):
interactions = ''
first_ts = None
for t in tasks:
if 'WorkerRequestLogger' in t['component']:
interactions += self.master_ts_template %('%s'%(t['timestamp']))
interactions += self.master_template %(r'%s\n%s'%(t['type'], t['detailed_message'].encode('string_escape')))
elif 'MasterRequestLogger' in t['component']:
interactions += self.worker_ts_template %('%s'%(t['timestamp']))
interactions += self.worker_template %(r'%s\n%s'%(t['type'], t['detailed_message'].encode('string_escape')))
else:
pdb.set_trace()
raise Exception('Unknown component')
return interactions
class MscSequenceDiagramGenerator:
def __init__(self):
self.master_template = 'a=>b: [label="%s", ident="left"];\n'
self.worker_template = 'a<=b: [label="%s", ident="left"];\n'
self.template = '''msc {
hscale="auto";
a [label="MASTER"], b [label="WORKER"];
%s
}'''
def escape_string(self, string):
return string.encode('string_escape').replace('"', '\\"').replace('{', '\\{').replace('}', '\\}').replace('[', '\\[').replace(']', '\\]')
def save_file(self,tasks):
interactions = ''
first_ts = None
for t in tasks:
if 'WorkerRequestLogger' in t['component']:
interactions += self.master_template %(r'%s\n%s'%(t['type'], self.escape_string(t['detailed_message'])))
elif 'MasterRequestLogger' in t['component']:
interactions += self.worker_template %(r'%s\n%s'%(t['type'], self.escape_string(t['detailed_message'])))
else:
pdb.set_trace()
raise Exception('Unknown component')
return self.template%(interactions)
if __name__ == "__main__":
r = RequestTraceParser()
r.parse(sys.argv[1], excludes=['HELLO', 'HELLOREPLY'])
j = JsSequenceDiagramGenerator()
js_file_contents = j.save_file(r.get_all())
open(sys.argv[2],'w').write(js_file_contents)
m = MscSequenceDiagramGenerator()
msc_file_contents = m.save_file(r.get_all())
open(sys.argv[3],'w').write(msc_file_contents)
|
thoughtpalette/thoughts.thoughtpalette.com | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/foxpro.py | 335 | # -*- coding: utf-8 -*-
"""
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
*New in Pygments 1.6.*
"""
name = 'FoxPro'
aliases = ['Clipper', 'XBase']
filenames = ['*.PRG', '*.prg']
mimetype = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&&).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}
|
kanagasabapathi/python-for-android | refs/heads/master | python-modules/twisted/twisted/internet/test/test_address.py | 56 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
import re
from twisted.trial import unittest
from twisted.internet.address import IPv4Address, UNIXAddress
class AddressTestCaseMixin(object):
def test_addressComparison(self):
"""
Test that two different address instances, sharing the same
properties are considered equal.
"""
self.assertEquals(self.buildAddress(), self.buildAddress())
def _stringRepresentation(self, stringFunction):
"""
Verify that the string representation of an address object conforms to a
simple pattern (the usual one for Python object reprs) and contains
values which accurately reflect the attributes of the address.
"""
addr = self.buildAddress()
pattern = "".join([
"^",
"([^\(]+Address)", # class name,
"\(", # opening bracket,
"([^)]+)", # arguments,
"\)", # closing bracket,
"$"
])
stringValue = stringFunction(addr)
m = re.match(pattern, stringValue)
self.assertNotEquals(
None, m,
"%s does not match the standard __str__ pattern "
"ClassName(arg1, arg2, etc)" % (stringValue,))
self.assertEquals(addr.__class__.__name__, m.group(1))
args = [x.strip() for x in m.group(2).split(",")]
self.assertEquals(
args,
[argSpec[1] % (getattr(addr, argSpec[0]),) for argSpec in self.addressArgSpec])
def test_str(self):
"""
C{str} can be used to get a string representation of an address instance
containing information about that address.
"""
self._stringRepresentation(str)
def test_repr(self):
"""
C{repr} can be used to get a string representation of an address
instance containing information about that address.
"""
self._stringRepresentation(repr)
class IPv4AddressTestCaseMixin(AddressTestCaseMixin):
addressArgSpec = (("type", "%s"), ("host", "%r"), ("port", "%d"))
class IPv4AddressTCPTestCase(unittest.TestCase, IPv4AddressTestCaseMixin):
def buildAddress(self):
return IPv4Address("TCP", "127.0.0.1", 0)
class IPv4AddressUDPTestCase(unittest.TestCase, IPv4AddressTestCaseMixin):
def buildAddress(self):
return IPv4Address("UDP", "127.0.0.1", 0)
class UNIXAddressTestCase(unittest.TestCase, AddressTestCaseMixin):
addressArgSpec = (("name", "%r"),)
def setUp(self):
self._socketAddress = self.mktemp()
def buildAddress(self):
return UNIXAddress(self._socketAddress)
|
eleonrk/SickRage | refs/heads/master | sickchill/show/recommendations/recommended.py | 2 | # coding=utf-8
#
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
"""
Recommend shows based on lists from indexers
"""
from __future__ import print_function, unicode_literals
import os
import posixpath
import sickbeard
from sickbeard import helpers
from sickchill.helper.encoding import ek
class RecommendedShow(object):
"""
Base class for show recommendations
"""
def __init__(self, show_id, title, indexer, indexer_id, cache_subfolder='recommended',
rating=None, votes=None, image_href=None, image_src=None):
"""
Create a show recommendation
:param show_id: as provided by the list provider
:param title: of the show as displayed in the recommended show page
:param indexer: used to map the show to
:param indexer_id: a mapped indexer_id for indexer
:param cache_subfolder: to store images
:param rating: of the show in percent
:param votes: number of votes
:param image_href: the href when clicked on the show image (poster)
:param image_src: the url to the "cached" image (poster)
"""
self.show_id = show_id
self.title = title
self.indexer = indexer
self.indexer_id = indexer_id
self.cache_subfolder = cache_subfolder
self.rating = rating
self.votes = votes
self.image_href = image_href
self.image_src = image_src
# Check if the show is currently already in the db
self.show_in_list = self.indexer_id in {show.indexerid for show in sickbeard.showList if show.indexerid}
self.session = helpers.make_session()
def cache_image(self, image_url):
"""
Store cache of image in cache dir
:param image_url: Source URL
"""
if not self.cache_subfolder:
return
self.image_src = ek(posixpath.join, 'images', self.cache_subfolder, ek(os.path.basename, image_url))
path = ek(os.path.abspath, ek(os.path.join, sickbeard.CACHE_DIR, 'images', self.cache_subfolder))
if not ek(os.path.exists, path):
ek(os.makedirs, path)
full_path = ek(posixpath.join, path, ek(os.path.basename, image_url))
if not ek(os.path.isfile, full_path):
helpers.download_file(image_url, full_path, session=self.session)
|
pdellaert/ansible | refs/heads/devel | test/integration/targets/module_utils/library/test_env_override.py | 170 | #!/usr/bin/python
# Most of these names are only available via PluginLoader so pylint doesn't
# know they exist
# pylint: disable=no-name-in-module
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.json_utils import data
from ansible.module_utils.mork import data as mork_data
results = {"json_utils": data, "mork": mork_data}
AnsibleModule(argument_spec=dict()).exit_json(**results)
|
wnesl/gnuradio-IA | refs/heads/master | gnuradio-core/src/examples/pfb/synth_filter.py | 17 | #!/usr/bin/env python
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = gr.sig_source_c(fs, gr.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = gr.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps)/nchans)
filtbank = gr.pfb_synthesizer_ccf(nchans, taps)
head = gr.head(gr.sizeof_gr_complex, N)
snk = gr.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = scipy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
pylab.show()
if __name__ == "__main__":
main()
|
antin/Open-Knesset | refs/heads/master | committees/migrations/0010_auto__add_field_committee_description.py | 14 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Committee.description'
db.add_column('committees_committee', 'description', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Committee.description'
db.delete_column('committees_committee', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'chaired_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'replacing_in_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"})
},
'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'committees.protocolpart': {
'Meta': {'object_name': 'ProtocolPart'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['committees.CommitteeMeeting']"}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'protocol_parts'", 'null': 'True', 'to': "orm['persons.Person']"})
},
'committees.topic': {
'Meta': {'object_name': 'Topic'},
'committees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['committees.Committee']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'editing_topics'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meetings': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'where': ('django.db.models.fields.TextField', [], {}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'symmetrical': 'False'})
},
'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_link'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.LinkType']", 'null': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['committees']
|
mistydemeo/gyp | refs/heads/master | test/copies/gyptest-default.py | 264 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using the build tool default.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies.gyp', chdir='relocate/src')
test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
test.built_file_must_match('copies-out/file2',
'file2 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/subdir/file6',
'file6 contents\n',
chdir='relocate/src')
test.pass_test()
|
CUCWD/edx-platform | refs/heads/master | openedx/core/djangoapps/catalog/migrations/0001_initial.py | 40 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CatalogIntegration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('internal_api_url', models.URLField(help_text='API root to be used for server-to-server requests (e.g., https://catalog-internal.example.com/api/v1/).', verbose_name='Internal API URL')),
('cache_ttl', models.PositiveIntegerField(default=0, help_text='Specified in seconds. Enable caching of API responses by setting this to a value greater than 0.', verbose_name='Cache Time To Live')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
AkA84/edx-platform | refs/heads/master | cms/djangoapps/contentstore/tests/test_export_git.py | 189 | """
Test the ability to export courses to xml from studio
"""
import copy
import os
import shutil
import subprocess
from uuid import uuid4
from django.conf import settings
from django.test.utils import override_settings
from .utils import CourseTestCase
import contentstore.git_export_utils as git_export_utils
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestExportGit(CourseTestCase):
"""
Tests pushing a course to a git repository
"""
def setUp(self):
"""
Setup test course, user, and url.
"""
super(TestExportGit, self).setUp()
self.course_module = modulestore().get_course(self.course.id)
self.test_url = reverse_course_url('export_git', self.course.id)
def make_bare_repo_with_course(self, repo_name):
"""
Make a local bare repo suitable for exporting to in
tests
"""
# Build out local bare repo, and set course git url to it
repo_dir = os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR)
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
bare_repo_dir = '{0}/{1}.git'.format(
os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR),
repo_name
)
os.mkdir(bare_repo_dir)
self.addCleanup(shutil.rmtree, bare_repo_dir)
subprocess.check_output(['git', '--bare', 'init', ], cwd=bare_repo_dir)
self.populate_course()
self.course_module.giturl = 'file://{}'.format(bare_repo_dir)
modulestore().update_item(self.course_module, self.user.id)
def test_giturl_missing(self):
"""
Test to make sure an appropriate error is displayed
if course hasn't set giturl.
"""
response = self.client.get(self.test_url)
self.assertEqual(200, response.status_code)
self.assertIn(
('giturl must be defined in your '
'course settings before you can export to git.'),
response.content
)
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertEqual(200, response.status_code)
self.assertIn(
('giturl must be defined in your '
'course settings before you can export to git.'),
response.content
)
def test_course_export_failures(self):
"""
Test failed course export response.
"""
self.course_module.giturl = 'foobar'
modulestore().update_item(self.course_module, self.user.id)
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertIn('Export Failed:', response.content)
def test_exception_translation(self):
"""
Regression test for making sure errors are properly stringified
"""
self.course_module.giturl = 'foobar'
modulestore().update_item(self.course_module, self.user.id)
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertNotIn('django.utils.functional.__proxy__', response.content)
def test_course_export_success(self):
"""
Test successful course export response.
"""
self.make_bare_repo_with_course('test_repo')
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertIn('Export Succeeded', response.content)
def test_repo_with_dots(self):
"""
Regression test for a bad directory pathing of repo's that have dots.
"""
self.make_bare_repo_with_course('test.repo')
response = self.client.get('{}?action=push'.format(self.test_url))
self.assertIn('Export Succeeded', response.content)
def test_dirty_repo(self):
"""
Add additional items not in the repo and make sure they aren't
there after the export. This allows old content to removed
in the repo.
"""
repo_name = 'dirty_repo1'
self.make_bare_repo_with_course(repo_name)
git_export_utils.export_to_git(self.course.id,
self.course_module.giturl, self.user)
# Make arbitrary change to course to make diff
self.course_module.matlab_api_key = 'something'
modulestore().update_item(self.course_module, self.user.id)
# Touch a file in the directory, export again, and make sure
# the test file is gone
repo_dir = os.path.join(
os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR),
repo_name
)
test_file = os.path.join(repo_dir, 'test.txt')
open(test_file, 'a').close()
self.assertTrue(os.path.isfile(test_file))
git_export_utils.export_to_git(self.course.id,
self.course_module.giturl, self.user)
self.assertFalse(os.path.isfile(test_file))
|
dyoung418/tensorflow | refs/heads/master | tensorflow/tools/ci_build/copy_binary.py | 31 | #!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically copy TensorFlow binaries
#
# Usage:
# ./tensorflow/tools/ci_build/copy_binary.py --filename
# tf_nightly/tf_nightly_gpu-1.4.0.dev20170914-cp35-cp35m-manylinux1_x86_64.whl
# --new_py_ver 36
#
"""Copy binaries of TensorFlow for different python versions."""
# pylint: disable=superfluous-parens
import argparse
import os
import re
import shutil
import subprocess
import zipfile
UNZIP_CMD = "/usr/bin/unzip"
ZIP_CMD = "/usr/bin/zip"
SED_CMD = "/bin/sed"
TF_NIGHTLY_REGEX = r"(.+)tf_nightly(|_gpu)-(\d\.\d\.\d.dev[\d]{0,8})-(.+)\.whl"
BINARY_STRING_TEMPLATE = "%s-%s-%s.whl"
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found.")
def copy_binary(directory, origin_tag, new_tag, version, gpu=False):
"""Rename and copy binaries for different python versions.
Arguments:
directory: string of directory
origin_tag: str of the old python version tag
new_tag: str of the new tag
version: the version of the package
gpu: bool if its a gpu build or not
"""
print("Rename and copy binaries with %s to %s." % (origin_tag, new_tag))
if gpu:
package = "tf_nightly_gpu"
else:
package = "tf_nightly"
origin_binary = BINARY_STRING_TEMPLATE % (package, version, origin_tag)
new_binary = BINARY_STRING_TEMPLATE % (package, version, new_tag)
zip_ref = zipfile.ZipFile(directory + origin_binary, "r")
zip_ref.extractall()
zip_ref.close()
old_py_ver = re.search(r"(cp\d\d-cp\d\d)", origin_tag).group(1)
new_py_ver = re.search(r"(cp\d\d-cp\d\d)", new_tag).group(1)
subprocess.check_call(
"%s -i s/%s/%s/g %s-%s.dist-info/WHEEL" % (SED_CMD, old_py_ver,
new_py_ver, package, version),
shell=True)
zout = zipfile.ZipFile(directory + new_binary, "w", zipfile.ZIP_DEFLATED)
zip_these_files = [
"%s-%s.dist-info" % (package, version),
"%s-%s.data" % (package, version)
]
for dirname in zip_these_files:
for root, _, files in os.walk(dirname):
for filename in files:
zout.write(os.path.join(root, filename))
zout.close()
for dirname in zip_these_files:
shutil.rmtree(dirname)
def main():
"""This script copies binaries.
Requirements:
filename: The path to the whl file
AND
new_py_ver: Create a nightly tag with current date
Raises:
RuntimeError: If the whl file was not found
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
# Arg information
parser.add_argument(
"--filename", help="path to whl file we are copying", required=True)
parser.add_argument(
"--new_py_ver", help="two digit py version eg. 27 or 33", required=True)
args = parser.parse_args()
# Argument checking
check_existence(args.filename)
regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)
directory = regex_groups.group(1)
gpu = regex_groups.group(2)
version = regex_groups.group(3)
origin_tag = regex_groups.group(4)
old_py_ver = re.search(r"(cp\d\d)", origin_tag).group(1)
# Create new tags
new_tag = origin_tag.replace(old_py_ver, "cp" + args.new_py_ver)
# Copy the binary with the info we have
copy_binary(directory, origin_tag, new_tag, version, gpu)
if __name__ == "__main__":
main()
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/tangible/crafting/base/shared_base_repair.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/crafting/base/shared_base_repair.iff"
result.attribute_template_id = 2
result.stfName("crafting","base_repair_tool")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
tlksio/tlksio | refs/heads/develop | env/lib/python3.4/site-packages/pymongo/ismaster.py | 19 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse a response to the 'ismaster' command."""
import itertools
from bson.py3compat import imap
from pymongo import common
from pymongo.server_type import SERVER_TYPE
def _get_server_type(doc):
"""Determine the server type from an ismaster response."""
if not doc.get('ok'):
return SERVER_TYPE.Unknown
if doc.get('isreplicaset'):
return SERVER_TYPE.RSGhost
elif doc.get('setName'):
if doc.get('hidden'):
return SERVER_TYPE.RSOther
elif doc.get('ismaster'):
return SERVER_TYPE.RSPrimary
elif doc.get('secondary'):
return SERVER_TYPE.RSSecondary
elif doc.get('arbiterOnly'):
return SERVER_TYPE.RSArbiter
else:
return SERVER_TYPE.RSOther
elif doc.get('msg') == 'isdbgrid':
return SERVER_TYPE.Mongos
else:
return SERVER_TYPE.Standalone
class IsMaster(object):
__slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable')
def __init__(self, doc):
"""Parse an ismaster response from the server."""
self._server_type = _get_server_type(doc)
self._doc = doc
self._is_writable = self._server_type in (
SERVER_TYPE.RSPrimary,
SERVER_TYPE.Standalone,
SERVER_TYPE.Mongos)
self._is_readable = (
self.server_type == SERVER_TYPE.RSSecondary
or self._is_writable)
@property
def document(self):
"""The complete ismaster command response document.
.. versionadded:: 3.4
"""
return self._doc.copy()
@property
def server_type(self):
return self._server_type
@property
def all_hosts(self):
"""List of hosts, passives, and arbiters known to this server."""
return set(imap(common.clean_node, itertools.chain(
self._doc.get('hosts', []),
self._doc.get('passives', []),
self._doc.get('arbiters', []))))
@property
def tags(self):
"""Replica set member tags or empty dict."""
return self._doc.get('tags', {})
@property
def primary(self):
"""This server's opinion about who the primary is, or None."""
if self._doc.get('primary'):
return common.partition_node(self._doc['primary'])
else:
return None
@property
def replica_set_name(self):
"""Replica set name or None."""
return self._doc.get('setName')
@property
def max_bson_size(self):
return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE)
@property
def max_message_size(self):
return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size)
@property
def max_write_batch_size(self):
return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE)
@property
def min_wire_version(self):
return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION)
@property
def max_wire_version(self):
return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION)
@property
def set_version(self):
return self._doc.get('setVersion')
@property
def election_id(self):
return self._doc.get('electionId')
@property
def is_writable(self):
return self._is_writable
@property
def is_readable(self):
return self._is_readable
@property
def me(self):
me = self._doc.get('me')
if me:
return common.clean_node(me)
@property
def last_write_date(self):
return self._doc.get('lastWrite', {}).get('lastWriteDate')
|
DolphinDream/sverchok | refs/heads/master | node_scripts/SNLite_templates/utils/shape_key_bake.py | 2 | """
in verts v .=[] n=0
in edges s .=[] n=0
in pols s .=[] n=0
in active s .=0 n=2
in frame s .=1 n=2
"""
import bpy
import bmesh
from mathutils import Vector
#for remove extra brecket
def okok(a):
if a:
return a[0]
else:
return []
verts = okok(verts)
edges = okok(edges)
pols = okok(pols)
# main function
def make():
# find mesh or create
if not 'sv_shape_key' in bpy.data.meshes:
mesh = bpy.data.meshes.new(name='sv_shape_key')
mesh.from_pydata(verts,edges,pols)
else:
mesh = bpy.data.meshes['sv_shape_key']
# find object or create
if not 'sv_shape_key' in bpy.data.objects:
obj = bpy.data.objects.new("sv_shape_key", mesh)
bpy.context.scene.objects.link(obj)
else:
obj = bpy.data.objects['sv_shape_key']
if not 'sv_shape_key' in bpy.context.scene.collection.objects:
bpy.context.scene.collection.objects.link(obj)
# shapekeys adding
# make shape key basis at first
if not obj.data.shape_keys:
sk0 = obj.shape_key_add(name='Basis')
else:
# name of new shape from frame number
kn = "sverchok %d"%frame
key = bpy.data.shape_keys[obj.data.shape_keys.name]
key.use_relative = False
# check for equal length of vertices
if len(obj.data.vertices) != len(verts):
return
# current frame to add
if kn not in key.key_blocks:
sk = obj.shape_key_add(name=kn)
for i,v in enumerate(verts):
key.key_blocks[kn].data[i].co = Vector(v)
if active and verts:
make()
#bpy.data.shape_keys[obj.data.shape_keys.name].key_blocks[kn].data[:].co |
orangeYao/twiOpinion | refs/heads/master | testGui/readJson.py | 1 | import re
import json
import sys
from pprint import pprint
from random import randint
from nltk import PorterStemmer
from nltk import FreqDist
from nltk.corpus import stopwords
stop = stopwords.words('english')
from guess_language import guessLanguage #in local
from optparse import OptionParser
parser = OptionParser()
wanted_keys = ['statuses_count', 'followers_count', 'friends_count', 'screen_name', 'created_at', 'location']
def is_ascii(text): #true means normal string, false needs handle
if isinstance(text, unicode):
try:
text.encode('ascii')
except UnicodeEncodeError:
return False
else:
try:
text.decode('ascii')
except UnicodeDecodeError:
return False
return True
def filter(text):
#get rid stop word, puctuation, number, turn to lower case and check spelling, also stemming
return_list = []
for i in re.split("[,. \-!?:_'%$/#@&;\n\d]+", text):
j = i.lower()
if not is_ascii(j):
j = j.encode('ascii','ignore')
#print j
if len(j) > 1 and (j not in stop) and (len(j) > 3):
k = PorterStemmer().stem_word(j)
if isinstance(k, unicode):
k = k.encode('ascii','ignore')
if (not k.isdigit()):
return_list.append(k)
return return_list
def main(fileNameIn):
count = 0;
f = open(fileNameIn[0:-5] + "_Fetched.txt", 'w')
f2 = open(fileNameIn[0:-5] + "_Fetched.json", 'w')
for line in open(fileNameIn, 'r'):
try:
loaded = json.loads(line)
tweet = loaded.get("text")
except ValueError:
#print "valueError cateched!"
continue
if type(tweet) is unicode:
tweet = re.sub(r'RT @.*?: ' ,'',tweet)
tweet = re.sub(r'http\S+', '', tweet)
tweet = re.sub(r'\n', '', tweet)
if guessLanguage(tweet) == 'en':
if is_ascii(tweet):
f.write(tweet + '\n')
count += 1
f2_dict = {}
f2_dict["text"] = tweet
for key in wanted_keys:
f2_dict[key] = loaded["user"].get(key)
json.dump(f2_dict, f2)
f2.write('\n')
return count
|
msegado/edx-platform | refs/heads/master | common/djangoapps/student/tests/test_reset_password.py | 34 | """
Test the various password reset flows
"""
import json
import re
import unittest
from django.core.cache import cache
from django.conf import settings
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, base36_to_int, int_to_base36
from mock import Mock, patch
import ddt
from student.views import password_reset, password_reset_confirm_wrapper, SETTING_CHANGE_INITIATED
from student.tests.factories import UserFactory
from student.tests.test_email import mock_render_to_string
from util.testing import EventTestMixin
from .test_microsite import fake_microsite_get_value
@ddt.ddt
class ResetPasswordTests(EventTestMixin, TestCase):
""" Tests that clicking reset password sends email, and doesn't activate the user
"""
request_factory = RequestFactory()
def setUp(self):
super(ResetPasswordTests, self).setUp('student.views.tracker')
self.user = UserFactory.create()
self.user.is_active = False
self.user.save()
self.token = default_token_generator.make_token(self.user)
self.uidb36 = int_to_base36(self.user.id)
self.user_bad_passwd = UserFactory.create()
self.user_bad_passwd.is_active = False
self.user_bad_passwd.password = UNUSABLE_PASSWORD_PREFIX
self.user_bad_passwd.save()
def uidb36_to_uidb64(self, uidb36=None):
""" Converts uidb36 into uidb64 """
return force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36 or self.uidb36))))
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_user_bad_password_reset(self):
"""Tests password reset behavior for user with password marked UNUSABLE_PASSWORD_PREFIX"""
bad_pwd_req = self.request_factory.post('/password_reset/', {'email': self.user_bad_passwd.email})
bad_pwd_resp = password_reset(bad_pwd_req)
# If they've got an unusable password, we return a successful response code
self.assertEquals(bad_pwd_resp.status_code, 200)
obj = json.loads(bad_pwd_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_nonexist_email_password_reset(self):
"""Now test the exception cases with of reset_password called with invalid email."""
bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + "makeItFail"})
bad_email_resp = password_reset(bad_email_req)
# Note: even if the email is bad, we return a successful response code
# This prevents someone potentially trying to "brute-force" find out which
# emails are and aren't registered with edX
self.assertEquals(bad_email_resp.status_code, 200)
obj = json.loads(bad_email_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_password_reset_ratelimited(self):
""" Try (and fail) resetting password 30 times in a row on an non-existant email address """
cache.clear()
for i in xrange(30):
good_req = self.request_factory.post('/password_reset/', {
'email': 'thisdoesnotexist{0}@foo.com'.format(i)
})
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
# then the rate limiter should kick in and give a HttpForbidden response
bad_req = self.request_factory.post('/password_reset/', {'email': 'thisdoesnotexist@foo.com'})
bad_resp = password_reset(bad_req)
self.assertEquals(bad_resp.status_code, 403)
self.assert_no_events_were_emitted()
cache.clear()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_reset_password_email(self, send_email):
"""Tests contents of reset password email, and that user is not active"""
good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})
good_req.user = self.user
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
obj = json.loads(good_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
(subject, msg, from_addr, to_addrs) = send_email.call_args[0]
self.assertIn("Password reset", subject)
self.assertIn("You're receiving this e-mail because you requested a password reset", msg)
self.assertEquals(from_addr, settings.DEFAULT_FROM_EMAIL)
self.assertEquals(len(to_addrs), 1)
self.assertIn(self.user.email, to_addrs)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None,
)
#test that the user is not active
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', msg).groupdict()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data((False, 'http://'), (True, 'https://'))
@ddt.unpack
def test_reset_password_email_https(self, is_secure, protocol, send_email):
"""
Tests that the right url protocol is included in the reset password link
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.is_secure = Mock(return_value=is_secure)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
expected_msg = "Please go to the following page and choose a new password:\n\n" + protocol
self.assertIn(expected_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data(('Crazy Awesome Site', 'Crazy Awesome Site'), (None, 'edX'))
@ddt.unpack
def test_reset_password_email_domain(self, domain_override, platform_name, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=domain_override)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}"
if domain_override:
reset_msg = reset_msg.format(domain_override)
else:
reset_msg = reset_msg.format(settings.SITE_NAME)
self.assertIn(reset_msg, msg)
sign_off = "The {} Team".format(platform_name)
self.assertIn(sign_off, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
@patch('django.core.mail.send_mail')
def test_reset_password_email_microsite(self, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=None)
req.user = self.user
password_reset(req)
_, msg, from_addr, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at openedx.localhost"
self.assertIn(reset_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
self.assertEqual(from_addr, "no-reply@fakeuniversity.com")
@patch('student.views.password_reset_confirm')
def test_reset_password_bad_token(self, reset_confirm):
"""Tests bad token and uidb36 in password reset"""
bad_reset_req = self.request_factory.get('/password_reset_confirm/NO-OP/')
password_reset_confirm_wrapper(bad_reset_req, 'NO', 'OP')
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb64'], self.uidb36_to_uidb64('NO'))
self.assertEquals(confirm_kwargs['token'], 'OP')
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
@patch('student.views.password_reset_confirm')
def test_reset_password_good_token(self, reset_confirm):
"""Tests good token and uidb36 in password reset"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb64'], self.uidb36_to_uidb64())
self.assertEquals(confirm_kwargs['token'], self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
@patch('student.views.password_reset_confirm')
@patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
def test_reset_password_good_token_microsite(self, reset_confirm):
"""Tests password reset confirmation page for micro site"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['extra_context']['platform_name'], 'Fake University')
@patch('student.views.password_reset_confirm')
def test_reset_password_with_reused_password(self, reset_confirm):
"""Tests good token and uidb36 in password reset"""
good_reset_req = self.request_factory.get('/password_reset_confirm/{0}-{1}/'.format(self.uidb36, self.token))
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['uidb64'], self.uidb36_to_uidb64())
self.assertEquals(confirm_kwargs['token'], self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
|
thomazs/geraldo | refs/heads/master | site/newsite/django_1_0/django/contrib/humanize/templatetags/humanize.py | 40 | from django.utils.translation import ungettext, ugettext as _
from django.utils.encoding import force_unicode
from django import template
from django.template import defaultfilters
from datetime import date
import re
register = template.Library()
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except ValueError:
return value
t = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return u"%d%s" % (value, t[0])
return u'%d%s' % (value, t[value % 10])
ordinal.is_safe = True
register.filter(ordinal)
def intcomma(value):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = force_unicode(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
intcomma.is_safe = True
register.filter(intcomma)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
value = int(value)
if value < 1000000:
return value
if value < 1000000000:
new_value = value / 1000000.0
return ungettext('%(value).1f million', '%(value).1f million', new_value) % {'value': new_value}
if value < 1000000000000:
new_value = value / 1000000000.0
return ungettext('%(value).1f billion', '%(value).1f billion', new_value) % {'value': new_value}
if value < 1000000000000000:
new_value = value / 1000000000000.0
return ungettext('%(value).1f trillion', '%(value).1f trillion', new_value) % {'value': new_value}
return value
intword.is_safe = False
register.filter(intword)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except ValueError:
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'), _('seven'), _('eight'), _('nine'))[value-1]
apnumber.is_safe = True
register.filter(apnumber)
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
delta = value - date.today()
if delta.days == 0:
return _(u'today')
elif delta.days == 1:
return _(u'tomorrow')
elif delta.days == -1:
return _(u'yesterday')
return defaultfilters.date(value, arg)
register.filter(naturalday)
|
julia2288-cmis/julia2288-cmis-cs2 | refs/heads/master | conditionals.py | 1 | import random
import math
print "This is just like snakes and ladders. If you press enter, computer will roll the dice. You have to go forward with the given number. There will be some traps in few steps. Good luck!"
def steps(total):
dice = raw_input("Click enter to roll the dice.")
if dice == "":
print
n = random.randint(1,6)
print n
total += int(n)
out = """
"You are in number {}.
""".format(total)
print out
return total
def traps():
total = steps(0)
if total == float(1):
print "You stepped on a snake and the snake bite you. You will start from the beginning again."
return steps(0)
return steps(total)
elif total == float(5):
print "You found an escalator in front of you. You will skip 4 steps to the front."
return abs(float(total+4))
return steps(total)
elif total == float(6):
print "You found an U.F.O, you will transfer to a random place."
return steps(random.random*10)
return steps(total)
elif total == float(52):
print "You stepped on a snake and the snake bite you. You will start from the beginning again."
return steps(0)
return steps(total)
elif total == float(84):
print "You stepped on a snake and the snake bite you. You will start from the beginning again."
return steps(0)
return steps(total)
def traps2():
if total == float(3):
print "You stepped on a banana peel. You will slip 3 steps back."
return abs(float(total-3))
return steps(total)
elif total == float(28):
print "You found an escalator in front of you. You will skip 4 steps to the front."
return abs(float(total+4))
return steps(total)
elif total == float(8):
print "You found an escalator in front of you. You will skip 4 steps to the front."
return abs(float(total+4))
return steps(total)
elif total == float(78):
print "You found an escalator in front of you. You will skip 4 steps to the front."
return abs(float(total+4))
return steps(total)
elif total == float(38):
print "You found an escalator in front of you. You will skip 4 steps to the front."
return abs(float(total+4))
return steps(total)
elif total == float(32):
print "You found an escalator in front of you. You will skip 4 steps to the front."
return abs(float(total+4))
return steps(total)
else:
return steps(total)
def end(total):
steps(0)
if total >= 100 :
return True
else:
return False
def main():
traps()
total = steps(0)
if total == 50:
print "Half way!"
else:
total == 90 or total > 90
print "Almost there!"
main()
|
Kiiv/CouchPotatoServer | refs/heads/develop | libs/tornado/web.py | 13 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import tornado
import traceback
import types
from io import BytesIO
from tornado.concurrent import Future, is_future
from tornado import escape
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import stack_context
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.util import import_object, ObjectDict, raise_exc_info, unicode_type, _websocket_mask
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overrided by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Subclass this class and define `get()` or `post()` to make a handler.
If you want to support more methods than the standard GET/HEAD/POST, you
should override the class variable ``SUPPORTED_METHODS`` in your
`RequestHandler` subclass.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # {path: template.BaseLoader}
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d", status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(br"[\x00-\x1f]")
def _convert_header_value(self, value):
if isinstance(value, bytes):
pass
elif isinstance(value, unicode_type):
value = value.encode('utf-8')
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
if (len(value) > 4000 or
RequestHandler._INVALID_HEADER_CHAR_RE.search(value)):
raise ValueError("Unsafe header value %r", value)
return value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments, strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for `self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
return create_signed_value(self.application.settings["cookie_secret"],
name, value, version=version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", urlparse.urljoin(utf8(self.request.uri),
utf8(url)))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if not isinstance(chunk, (bytes, unicode_type, dict)):
raise TypeError("write() only accepts bytes, unicode, and dict objects")
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader``
application setting is supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers, chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine(self.request.version,
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice. May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code == 304:
assert not self._write_buffer, "Cannot send body with 304"
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
self.finish()
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is a cached version of `get_current_user`, which you can
override to set the user based on, e.g., a cookie. If that
method is not overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
return None, None, None
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
etag = self._headers.get("Etag")
inm = utf8(self.request.headers.get("If-None-Match", ""))
return bool(etag and inm and inm.find(etag) >= 0)
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
self._handle_request_exception(e)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return self.request.method + " " + self.request.uri + \
" (" + self.request.remote_ip + ")"
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish()
return
self.log_exception(*sys.exc_info())
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is unnecessary if the method is also decorated with
``@gen.coroutine`` (it is legal but unnecessary to use the two
decorators together, in which case ``@asynchronous`` must be
first).
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example::
class MyRequestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. versionadded:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
"""
# Delay the IOLoop import because it's not available on app engine.
from tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if isinstance(result, Future):
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
There is a subtle interaction between ``data_received`` and asynchronous
``prepare``: The first call to ``data_recieved`` may occur at any point
after the call to ``prepare`` has returned *or yielded*.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(httputil.HTTPServerConnectionDelegate):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.instance().start()
The constructor for this class takes in a list of `URLSpec` objects
or (regexp, request_class) tuples. When we receive requests, we
iterate over the list in order and instantiate an instance of the
first request class whose regexp matches the request path.
The request class can be specified as either a class object or a
(fully-qualified) name.
Each tuple can contain additional elements, which correspond to the
arguments to the `URLSpec` constructor. (Prior to Tornado 3.2, this
only tuples of two or three elements were allowed).
A dictionary may be passed as the third element of the tuple,
which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
"""
def __init__(self, handlers=None, default_host="", transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if handlers:
self.add_handlers(".*$", handlers)
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
# Automatically reload modified modules
if self.settings.get('autoreload'):
from tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.instance().start()`` to start the server.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
# The handlers with the wildcard host_pattern are a special
# case - they're added in the constructor but should have lower
# precedence than the more-precise handlers added later.
# If a wildcard handler group exists, it should always be last
# in the list, so insert new groups just before it.
if self.handlers and self.handlers[-1][0].pattern == '.*$':
self.handlers.insert(-1, (re.compile(host_pattern), handlers))
else:
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if isinstance(spec, (tuple, list)):
assert len(spec) in (2, 3, 4)
spec = URLSpec(*spec)
handlers.append(spec)
if spec.name:
if spec.name in self.named_handlers:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
spec.name)
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = request.host.lower().split(':')[0]
matches = []
for pattern, handlers in self.handlers:
if pattern.match(host):
matches.extend(handlers)
# Look for default host if not behind load balancer (for debugging)
if not matches and "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
matches.extend(handlers)
return matches or None
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def start_request(self, connection):
# Modern HTTPServer interface
return _RequestDispatcher(self, connection)
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = _RequestDispatcher(self, None)
dispatcher.set_request(request)
return dispatcher.execute()
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _RequestDispatcher(httputil.HTTPMessageDelegate):
def __init__(self, application, connection):
self.application = application
self.connection = connection
self.request = None
self.chunks = []
self.handler_class = None
self.handler_kwargs = None
self.path_args = []
self.path_kwargs = {}
def headers_received(self, start_line, headers):
self.set_request(httputil.HTTPServerRequest(
connection=self.connection, start_line=start_line, headers=headers))
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def set_request(self, request):
self.request = request
self._find_handler()
self.stream_request_body = _has_stream_request_body(self.handler_class)
def _find_handler(self):
# Identify the handler to use as soon as we have the request.
# Save url path arguments for later.
app = self.application
handlers = app._get_host_handlers(self.request)
if not handlers:
self.handler_class = RedirectHandler
self.handler_kwargs = dict(url="http://" + app.default_host + "/")
return
for spec in handlers:
match = spec.regex.match(self.request.path)
if match:
self.handler_class = spec.handler_class
self.handler_kwargs = spec.kwargs
if spec.regex.groups:
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups, we want to use either groups
# or groupdict but not both.
if spec.regex.groupindex:
self.path_kwargs = dict(
(str(k), _unquote_or_none(v))
for (k, v) in match.groupdict().items())
else:
self.path_args = [_unquote_or_none(s)
for s in match.groups()]
return
if app.settings.get('default_handler_class'):
self.handler_class = app.settings['default_handler_class']
self.handler_kwargs = app.settings.get(
'default_handler_args', {})
else:
self.handler_class = ErrorHandler
self.handler_kwargs = dict(status_code=404)
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future.
self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will end
(calling `RequestHandler.finish` if it hasn't already been called),
but the outgoing response will not be modified and the error-handling
methods (including `RequestHandler.write_error`) will not be called.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video), but this handler should not be used with
files that are too large to fit comfortably in memory.
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {}
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified, content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
root = os.path.abspath(root)
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
return mime_type
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if 'Vary' in headers:
headers['Vary'] += b', Accept-Encoding'
else:
headers['Vary'] = b'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Overridden in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Returns a JavaScript string that will be embedded in the page."""
return None
def javascript_files(self):
"""Returns a list of JavaScript files required by this module."""
return None
def embedded_css(self):
"""Returns a CSS string that will be embedded in the page."""
return None
def css_files(self):
"""Returns a list of CSS files required by this module."""
return None
def html_head(self):
"""Returns a CSS string that will be put in the <head/> element"""
return None
def html_body(self):
"""Returns an HTML string that will be put in the <body/> element"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any groups
in the regex will be passed in to the handler's get/post/etc
methods as arguments.
* ``handler``: `RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`Application.reverse_url`.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
if isinstance(handler, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
handler = import_object(handler)
self.handler_class = handler
self.kwargs = kwargs or {}
self.name = name
self._path, self._group_count = self._find_groups()
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes)):
a = str(a)
converted_args.append(escape.url_escape(utf8(a), plus=False))
return self._path % tuple(converted_args)
url = URLSpec
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (currently 0; reserved for future key rotation features)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2|1:0",
format_field(timestamp),
format_field(name),
format_field(value),
b''])
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def decode_signed_value(secret, name, value, max_age_days=31, clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
# Figure out what version this is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
value = utf8(value)
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value, max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value, max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r", value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
try:
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, rest = _consume_field(rest)
except ValueError:
return None
passed_sig = rest
signed_string = value[:-len(passed_sig)]
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def _unquote_or_none(s):
"""None-safe wrapper around url_unescape to handle unamteched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return escape.url_unescape(s, encoding=None, plus=False)
|
etkirsch/legends-of-erukar | refs/heads/master | erukar/content/conditions/negative/Blinded.py | 1 | from erukar.system.engine import Condition
class Blinded(Condition):
IsTemporary = True
Duration = 4 # In ticks, where a tick is 5 seconds
Incapacitates = False
Noun = 'Blindness'
Participle = 'Blinding'
Description = 'Reduces Acuity to Zero'
def modify_acuity(self):
return -self.target.acuity
|
ytanay/thinglang | refs/heads/master | thinglang/symbols/symbol_map.py | 1 | import collections
from typing import List
from thinglang.lexer.values.identifier import Identifier, GenericIdentifier
from thinglang.symbols.merged_symbol import MergedSymbol
from thinglang.symbols.symbol import Symbol
from thinglang.utils import collection_utils
class SymbolMap(object):
"""
Describes a symbol map - the public fields (members and methods) of a ThingDefinition.
Each SymbolMap also has an index number, by which it is known to the runtime.
"""
def __init__(self, members: List[Symbol], methods: List[Symbol], name: Identifier, extends: Identifier, generics: List[Identifier], convention, member_offset: int=0, method_offset: int=0):
self.members, self.methods, self.name, self.extends, self.generics, self.convention, self.member_offset, self.method_offset = \
members, self.merge_method_symbols(methods), name, extends, generics or [], convention, member_offset, method_offset
self.lookup = {
symbol.name: symbol for symbol in self.members + self.methods
}
assert len(self.methods) + len(self.members) == len(self.lookup), 'Thing definition contains colliding elements'
assert {x.convention for x in self.lookup.values()} == {self.convention}, 'Inconsistent calling conventions identified'
def serialize(self) -> dict:
"""
Serialize this symbol map (and its symbols) into a dict
"""
return {
"name": self.name,
"extends": self.extends,
"generics": self.generics,
"offsets": {
"members": self.member_offset,
"methods": self.method_offset
},
"convention": Symbol.serialize_convention(self.convention),
"symbols": collection_utils.flatten([x.serialize() for x in self.lookup.values()])
}
@classmethod
def from_serialized(cls, data: dict) -> 'SymbolMap':
"""
Reads a serialized symbol map and returns a new SymbolMap object.
Additionally, deserializes its symbols into Symbol objects
"""
symbols = [Symbol.load(elem) for elem in data['symbols']]
members = [symbol for symbol in symbols if symbol.kind == Symbol.MEMBER]
methods = [symbol for symbol in symbols if symbol.kind == Symbol.METHOD]
extends = Symbol.load_identifier(data['extends']) if data['extends'] else None
return cls(members=members,
methods=methods,
name=Identifier(data['name']),
extends=extends,
generics=[Identifier(x) for x in data['generics']],
convention=Symbol.serialize_convention(data['convention']),
member_offset=data['offsets']['members'],
method_offset=data['offsets']['methods'])
@classmethod
def from_thing(cls, thing, extends: 'SymbolMap') -> 'SymbolMap':
"""
Creates a new Symbol map from a ThingDefinition
:param thing: the source ThingDefinition
:param index: the index of the new symbol map
:param extends: optionally, the symbol map from which this thing inherits
"""
member_offset, method_offset = 0, 0
if extends is not None:
member_offset, method_offset = len(extends.members) + extends.member_offset, len(extends.methods) + extends.method_offset
members = [elem.symbol().update_index(member_offset + index) for index, elem in enumerate(thing.members)]
methods = [elem.symbol().update_index(method_offset + index) for index, elem in enumerate(thing.methods)]
return cls(members,
methods,
thing.name,
thing.extends,
thing.generics,
Symbol.BYTECODE,
member_offset=member_offset,
method_offset=method_offset)
def parameterize(self, parameters: dict) -> 'SymbolMap':
"""
Creates a new SymbolMap, replacing the generic parameters in this SymbolMap with determined values
:param parameters: a mapping of generic name -> resolved name
"""
assert set(parameters.keys()) == set(self.generics), 'Partial parameterization is not allowed'
return SymbolMap(
[x.parameterize(parameters) for x in self.members],
[x.parameterize(parameters) for x in self.methods],
GenericIdentifier(self.name, tuple([parameters[x] for x in self.generics])),
self.extends,
[],
self.convention,
self.member_offset,
self.method_offset)
def __getitem__(self, item: Identifier) -> Symbol:
"""
Returns a symbol from this map
"""
return self.lookup[item]
def __contains__(self, item: Identifier) -> bool:
"""
Checks if a symbol identified by `item` exists
"""
return item in self.lookup
def __iter__(self):
"""
Iterates over all the fields of this symbol map
"""
return iter(self.lookup.values())
def __repr__(self):
return f'SymbolMap({self.name})'
@staticmethod
@collection_utils.drain()
def merge_method_symbols(methods):
method_symbols = collections.defaultdict(list)
for method_symbol in methods:
method_symbols[method_symbol.name].append(method_symbol)
for symbol_name, symbols in method_symbols.items():
yield symbols.pop() if len(symbols) == 1 else MergedSymbol(symbols)
|
mapzen/TileStache | refs/heads/integration-1 | TileStache/Goodies/Providers/PostGeoJSON.py | 11 | """ Provider that returns GeoJSON data responses from PostGIS queries.
Note:
The built-in TileStache Vector provider (new in version 1.9.0) offers a more
complete method of generating vector tiles, and supports many kinds of data
sources not avilable in PostGeoJSON such as shapefiles. PostGeoJSON will
continue to be provided and supported in TileStache, but future development
of vector support will be contentrated on the mainline Vector provider, not
this one.
More information:
http://tilestache.org/doc/TileStache.Vector.html
Anyway.
This is an example of a provider that does not return an image, but rather
queries a database for raw data and replies with a string of GeoJSON. For
example, it's possible to retrieve data for locations of OpenStreetMap points
of interest based on a query with a bounding box intersection.
Read more about the GeoJSON spec at: http://geojson.org/geojson-spec.html
Many Polymaps (http://polymaps.org) examples use GeoJSON vector data tiles,
which can be effectively created using this provider.
Keyword arguments:
dsn:
Database connection string suitable for use in psycopg2.connect().
See http://initd.org/psycopg/docs/module.html#psycopg2.connect for more.
query:
PostGIS query with a "!bbox!" placeholder for the tile bounding box.
Note that the table *must* use the web spherical mercaotr projection
900913. Query should return an id column, a geometry column, and other
columns to be placed in the GeoJSON "properties" dictionary.
See below for more on 900913.
clipping:
Boolean flag for optionally clipping the output geometries to the bounds
of the enclosing tile. Defaults to fales. This results in incomplete
geometries, dramatically smaller file sizes, and improves performance
and compatibility with Polymaps (http://polymaps.org).
id_column:
Name of id column in output, detaults to "id". This determines which query
result column is placed in the GeoJSON "id" field.
geometry_column:
Name of geometry column in output, defaults to "geometry". This determines
which query result column is reprojected to lat/lon and output as a list
of geographic coordinates.
indent:
Number of spaces to indent output GeoJSON response. Defaults to 2.
Skip all indenting with a value of zero.
precision:
Number of decimal places of precision for output geometry. Defaults to 6.
Default should be appropriate for almost all street-mapping situations.
A smaller value can help cut down on output file size for lower-zoom maps.
Example TileStache provider configuration:
"points-of-interest":
{
"provider":
{
"class": "TileStache.Goodies.Providers.PostGeoJSON.Provider",
"kwargs":
{
"dsn": "dbname=geodata user=postgres",
"query": "SELECT osm_id, name, way FROM planet_osm_point WHERE way && !bbox! AND name IS NOT NULL",
"id_column": "osm_id", "geometry_column": "way",
"indent": 2
}
}
}
Caveats:
Currently only databases in the 900913 (google) projection are usable,
though this is the default setting for OpenStreetMap imports from osm2pgsql.
The "!bbox!" query placeholder (see example below) must be lowercase, and
expands to:
ST_SetSRID(ST_MakeBox2D(ST_MakePoint(ulx, uly), ST_MakePoint(lrx, lry)), 900913)
You must support the "900913" SRID in your PostGIS database for now.
For populating the internal PostGIS spatial_ref_sys table of projections,
this seems to work:
INSERT INTO spatial_ref_sys
(srid, auth_name, auth_srid, srtext, proj4text)
VALUES
(
900913, 'spatialreference.org', 900913,
'PROJCS["Popular Visualisation CRS / Mercator",GEOGCS["Popular Visualisation CRS",DATUM["Popular_Visualisation_Datum",SPHEROID["Popular Visualisation Sphere",6378137,0,AUTHORITY["EPSG","7059"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6055"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4055"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],AUTHORITY["EPSG","3785"],AXIS["X",EAST],AXIS["Y",NORTH]]',
'+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over'
);
"""
from re import compile
from copy import copy as _copy
from binascii import unhexlify as _unhexlify
try:
from json import JSONEncoder
except ImportError:
from simplejson import JSONEncoder
try:
from shapely.wkb import loads as _loadshape
from shapely.geometry import Polygon
from shapely.geos import TopologicalError
from psycopg2 import connect as _connect
from psycopg2.extras import RealDictCursor
except ImportError:
# At least it should be possible to build the documentation.
pass
from TileStache.Core import KnownUnknown
from TileStache.Geography import getProjectionByName
def row2feature(row, id_field, geometry_field):
""" Convert a database row dict to a feature dict.
"""
feature = {'type': 'Feature', 'properties': _copy(row)}
geometry = feature['properties'].pop(geometry_field)
feature['geometry'] = _loadshape(_unhexlify(geometry))
feature['id'] = feature['properties'].pop(id_field)
return feature
def _p2p(xy, projection):
""" Convert a simple (x, y) coordinate to a (lon, lat) position.
"""
loc = projection.projLocation(_Point(*xy))
return loc.lon, loc.lat
class _InvisibleBike(Exception): pass
def shape2geometry(shape, projection, clip):
""" Convert a Shapely geometry object to a GeoJSON-suitable geometry dict.
"""
if clip:
try:
shape = shape.intersection(clip)
except TopologicalError:
raise _InvisibleBike("Clipping shape resulted in a topological error")
if shape.is_empty:
raise _InvisibleBike("Clipping shape resulted in a null geometry")
geom = shape.__geo_interface__
if geom['type'] == 'Point':
geom['coordinates'] = _p2p(geom['coordinates'], projection)
elif geom['type'] in ('MultiPoint', 'LineString'):
geom['coordinates'] = [_p2p(c, projection)
for c in geom['coordinates']]
elif geom['type'] in ('MultiLineString', 'Polygon'):
geom['coordinates'] = [[_p2p(c, projection)
for c in cs]
for cs in geom['coordinates']]
elif geom['type'] == 'MultiPolygon':
geom['coordinates'] = [[[_p2p(c, projection)
for c in cs]
for cs in ccs]
for ccs in geom['coordinates']]
return geom
class _Point:
""" Local duck for (x, y) points.
"""
def __init__(self, x, y):
self.x = x
self.y = y
class SaveableResponse:
""" Wrapper class for JSON response that makes it behave like a PIL.Image object.
TileStache.getTile() expects to be able to save one of these to a buffer.
"""
def __init__(self, content, indent=2, precision=2):
self.content = content
self.indent = indent
self.precision = precision
def save(self, out, format):
if format != 'JSON':
raise KnownUnknown('PostGeoJSON only saves .json tiles, not "%s"' % format)
indent = None
if int(self.indent) > 0:
indent = self.indent
encoded = JSONEncoder(indent=indent).iterencode(self.content)
float_pat = compile(r'^-?\d+\.\d+$')
precision = 6
if int(self.precision) > 0:
precision = self.precision
format = '%.' + str(precision) + 'f'
for atom in encoded:
if float_pat.match(atom):
out.write(format % float(atom))
else:
out.write(atom)
class Provider:
"""
"""
def __init__(self, layer, dsn, query, clipping=False, id_column='id', geometry_column='geometry', indent=2, precision=6):
self.layer = layer
self.dbdsn = dsn
self.query = query
self.mercator = getProjectionByName('spherical mercator')
self.geometry_field = geometry_column
self.id_field = id_column
self.indent = indent
self.precision = precision
self.clipping = clipping
def getTypeByExtension(self, extension):
""" Get mime-type and format by file extension.
This only accepts "json".
"""
if extension.lower() != 'json':
raise KnownUnknown('PostGeoJSON only makes .json tiles, not "%s"' % extension)
return 'application/json', 'JSON'
def renderTile(self, width, height, srs, coord):
""" Render a single tile, return a SaveableResponse instance.
"""
nw = self.layer.projection.coordinateLocation(coord)
se = self.layer.projection.coordinateLocation(coord.right().down())
ul = self.mercator.locationProj(nw)
lr = self.mercator.locationProj(se)
bbox = 'ST_SetSRID(ST_MakeBox2D(ST_MakePoint(%.6f, %.6f), ST_MakePoint(%.6f, %.6f)), 900913)' % (ul.x, ul.y, lr.x, lr.y)
clip = self.clipping and Polygon([(ul.x, ul.y), (lr.x, ul.y), (lr.x, lr.y), (ul.x, lr.y)]) or None
db = _connect(self.dbdsn).cursor(cursor_factory=RealDictCursor)
db.execute(self.query.replace('!bbox!', bbox))
rows = db.fetchall()
db.close()
response = {'type': 'FeatureCollection', 'features': []}
for row in rows:
feature = row2feature(row, self.id_field, self.geometry_field)
try:
geom = shape2geometry(feature['geometry'], self.mercator, clip)
except _InvisibleBike:
# don't output this geometry because it's empty
pass
else:
feature['geometry'] = geom
response['features'].append(feature)
return SaveableResponse(response, self.indent, self.precision)
|
takahashiminoru/ryu | refs/heads/master | ryu/services/protocols/bgp/rtconf/vrfs.py | 4 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Running or runtime configuration related to Virtual Routing and Forwarding
tables (VRFs).
"""
import abc
import logging
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.services.protocols.bgp.utils import validation
from ryu.services.protocols.bgp.base import get_validator
from ryu.services.protocols.bgp.rtconf.base import BaseConf
from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
from ryu.services.protocols.bgp.rtconf.base import ConfWithId
from ryu.services.protocols.bgp.rtconf.base import ConfWithIdListener
from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
from ryu.services.protocols.bgp.rtconf.base import ConfWithStatsListener
from ryu.services.protocols.bgp.rtconf.base import MAX_NUM_EXPORT_RT
from ryu.services.protocols.bgp.rtconf.base import MAX_NUM_IMPORT_RT
from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
from ryu.services.protocols.bgp.rtconf.base import validate
from ryu.services.protocols.bgp.rtconf.base import validate_med
from ryu.services.protocols.bgp.rtconf.base import validate_soo_list
LOG = logging.getLogger('bgpspeaker.rtconf.vrfs')
# Configuration setting names.
ROUTE_DISTINGUISHER = 'route_dist'
IMPORT_RTS = 'import_rts'
EXPORT_RTS = 'export_rts'
VRF_NAME = 'vrf_name'
VRF_DESC = 'vrf_desc'
VRF_RF = 'route_family'
IMPORT_MAPS = 'import_maps'
# Two supported VRF route-families
VRF_RF_IPV6 = 'ipv6'
VRF_RF_IPV4 = 'ipv4'
SUPPORTED_VRF_RF = (VRF_RF_IPV4, VRF_RF_IPV6)
# Default configuration values.
DEFAULT_VRF_NAME = 'no-vrf-name'
DEFAULT_VRF_DESC = 'no-vrf-desc'
@validate(name=IMPORT_RTS)
def validate_import_rts(import_rts):
if not isinstance(import_rts, list):
raise ConfigTypeError(conf_name=IMPORT_RTS, conf_value=import_rts)
if not (len(import_rts) <= MAX_NUM_IMPORT_RT):
raise ConfigValueError(desc='Max. import RT is limited to %s' %
MAX_NUM_IMPORT_RT)
if not all(validation.is_valid_ext_comm_attr(rt) for rt in import_rts):
raise ConfigValueError(conf_name=IMPORT_RTS, conf_value=import_rts)
# Check if we have duplicates
unique_rts = set(import_rts)
if len(unique_rts) != len(import_rts):
raise ConfigValueError(desc='Duplicate value provided %s' %
(import_rts))
return import_rts
@validate(name=EXPORT_RTS)
def validate_export_rts(export_rts):
if not isinstance(export_rts, list):
raise ConfigTypeError(conf_name=EXPORT_RTS, conf_value=export_rts)
if not (len(export_rts) <= MAX_NUM_EXPORT_RT):
raise ConfigValueError(desc='Max. import RT is limited to %s' %
MAX_NUM_EXPORT_RT)
if not all(validation.is_valid_ext_comm_attr(rt) for rt in export_rts):
raise ConfigValueError(conf_name=EXPORT_RTS, conf_value=export_rts)
# Check if we have duplicates
unique_rts = set(export_rts)
if len(unique_rts) != len(export_rts):
raise ConfigValueError(desc='Duplicate value provided in %s' %
(export_rts))
return export_rts
@validate(name=ROUTE_DISTINGUISHER)
def validate_rd(route_dist):
if not validation.is_valid_route_dist(route_dist):
raise ConfigValueError(conf_name=ROUTE_DISTINGUISHER,
conf_value=route_dist)
return route_dist
@validate(name=VRF_RF)
def validate_vrf_rf(vrf_rf):
if vrf_rf not in SUPPORTED_VRF_RF:
raise ConfigValueError(desc='Give VRF route family %s is not '
'supported.' % vrf_rf)
return vrf_rf
class VrfConf(ConfWithId, ConfWithStats):
"""Class that encapsulates configurations for one VRF."""
VRF_CHG_EVT = 'vrf_chg_evt'
VALID_EVT = frozenset([VRF_CHG_EVT])
REQUIRED_SETTINGS = frozenset([ROUTE_DISTINGUISHER,
IMPORT_RTS,
EXPORT_RTS])
OPTIONAL_SETTINGS = frozenset(
[VRF_NAME, MULTI_EXIT_DISC, SITE_OF_ORIGINS, VRF_RF, IMPORT_MAPS]
)
def __init__(self, **kwargs):
"""Create an instance of VRF runtime configuration."""
super(VrfConf, self).__init__(**kwargs)
def _init_opt_settings(self, **kwargs):
super(VrfConf, self)._init_opt_settings(**kwargs)
# We do not have valid default MED value.
# If no MED attribute is provided then we do not have to use MED.
# If MED attribute is provided we have to validate it and use it.
med = kwargs.pop(MULTI_EXIT_DISC, None)
if med and validate_med(med):
self._settings[MULTI_EXIT_DISC] = med
# We do not have valid default SOO value.
# If no SOO attribute is provided then we do not have to use SOO.
# If SOO attribute is provided we have to validate it and use it.
soos = kwargs.pop(SITE_OF_ORIGINS, None)
if soos and validate_soo_list(soos):
self._settings[SITE_OF_ORIGINS] = soos
# Current we we only support VRF for IPv4 and IPv6 with default IPv4
vrf_rf = kwargs.pop(VRF_RF, VRF_RF_IPV4)
if vrf_rf and validate_vrf_rf(vrf_rf):
self._settings[VRF_RF] = vrf_rf
import_maps = kwargs.pop(IMPORT_MAPS, [])
self._settings[IMPORT_MAPS] = import_maps
# =========================================================================
# Required attributes
# =========================================================================
@property
def route_dist(self):
return self._settings[ROUTE_DISTINGUISHER]
# =========================================================================
# Optional attributes with valid defaults.
# =========================================================================
@property
def import_rts(self):
return list(self._settings[IMPORT_RTS])
@property
def export_rts(self):
return list(self._settings[EXPORT_RTS])
@property
def soo_list(self):
soos = self._settings.get(SITE_OF_ORIGINS)
if soos:
soos = list(soos)
else:
soos = []
return soos
@property
def multi_exit_disc(self):
"""Returns configured value of MED, else None.
This configuration does not have default value.
"""
return self._settings.get(MULTI_EXIT_DISC)
@property
def route_family(self):
"""Returns configured route family for this VRF
This configuration does not change.
"""
return self._settings.get(VRF_RF)
@property
def rd_rf_id(self):
return VrfConf.create_rd_rf_id(self.route_dist, self.route_family)
@property
def import_maps(self):
return self._settings.get(IMPORT_MAPS)
@staticmethod
def create_rd_rf_id(route_dist, route_family):
return route_dist, route_family
@staticmethod
def vrf_rf_2_rf(vrf_rf):
if vrf_rf == VRF_RF_IPV4:
return RF_IPv4_UC
elif vrf_rf == VRF_RF_IPV6:
return RF_IPv6_UC
else:
raise ValueError('Unsupported VRF route family given %s' % vrf_rf)
@staticmethod
def rf_2_vrf_rf(route_family):
if route_family == RF_IPv4_UC:
return VRF_RF_IPV4
elif route_family == RF_IPv6_UC:
return VRF_RF_IPV6
else:
raise ValueError('No supported mapping for route family '
'to vrf_route_family exists for %s' %
route_family)
@property
def settings(self):
"""Returns a copy of current settings.
As some of the attributes are themselves containers, we clone the
settings to provide clones for those containers as well.
"""
# Shallow copy first
cloned_setting = self._settings.copy()
# Don't want clone to link to same RT containers
cloned_setting[IMPORT_RTS] = self.import_rts
cloned_setting[EXPORT_RTS] = self.export_rts
cloned_setting[SITE_OF_ORIGINS] = self.soo_list
return cloned_setting
@classmethod
def get_opt_settings(cls):
self_confs = super(VrfConf, cls).get_opt_settings()
self_confs.update(VrfConf.OPTIONAL_SETTINGS)
return self_confs
@classmethod
def get_req_settings(cls):
self_confs = super(VrfConf, cls).get_req_settings()
self_confs.update(VrfConf.REQUIRED_SETTINGS)
return self_confs
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(VrfConf, cls).get_valid_evts()
self_valid_evts.update(VrfConf.VALID_EVT)
return self_valid_evts
def update(self, **kwargs):
"""Updates this `VrfConf` settings.
Notifies listeners if any settings changed. Returns `True` if update
was successful. This vrfs' route family, id and route dist settings
cannot be updated/changed.
"""
# Update inherited configurations
super(VrfConf, self).update(**kwargs)
vrf_id = kwargs.get(ConfWithId.ID)
vrf_rd = kwargs.get(ROUTE_DISTINGUISHER)
vrf_rf = kwargs.get(VRF_RF)
if (vrf_id != self.id or
vrf_rd != self.route_dist or
vrf_rf != self.route_family):
raise ConfigValueError(desc='id/route-distinguisher/route-family'
' do not match configured value.')
# Validate and update individual settings
new_imp_rts, old_imp_rts = \
self._update_import_rts(**kwargs)
export_rts_changed = self._update_export_rts(**kwargs)
soos_list_changed = self._update_soo_list(**kwargs)
med_changed = self._update_med(**kwargs)
re_export_needed = (export_rts_changed or
soos_list_changed or
med_changed)
import_maps = kwargs.get(IMPORT_MAPS, [])
re_import_needed = self._update_importmaps(import_maps)
# If we did have any change in value of any settings, we notify
# listeners
if (new_imp_rts is not None or
old_imp_rts is not None or
re_export_needed or re_import_needed):
evt_value = (
new_imp_rts,
old_imp_rts,
import_maps,
re_export_needed,
re_import_needed
)
self._notify_listeners(VrfConf.VRF_CHG_EVT, evt_value)
return True
def _update_import_rts(self, **kwargs):
import_rts = kwargs.get(IMPORT_RTS)
get_validator(IMPORT_RTS)(import_rts)
curr_import_rts = set(self._settings[IMPORT_RTS])
import_rts = set(import_rts)
if not import_rts.symmetric_difference(curr_import_rts):
return (None, None)
# Get the difference between current and new RTs
new_import_rts = import_rts - curr_import_rts
old_import_rts = curr_import_rts - import_rts
# Update current RTs and notify listeners.
self._settings[IMPORT_RTS] = import_rts
return (new_import_rts, old_import_rts)
def _update_export_rts(self, **kwargs):
export_rts = kwargs.get(EXPORT_RTS)
get_validator(EXPORT_RTS)(export_rts)
curr_export_rts = set(self._settings[EXPORT_RTS])
if curr_export_rts.symmetric_difference(export_rts):
# Update current RTs and notify listeners.
self._settings[EXPORT_RTS] = list(export_rts)
return True
return False
def _update_soo_list(self, **kwargs):
soo_list = kwargs.get(SITE_OF_ORIGINS, [])
get_validator(SITE_OF_ORIGINS)(soo_list)
curr_soos = set(self.soo_list)
# If given list is different from existing settings, we update it
if curr_soos.symmetric_difference(soo_list):
self._settings[SITE_OF_ORIGINS] = soo_list[:]
return True
return False
def _update_med(self, **kwargs):
multi_exit_disc = kwargs.get(MULTI_EXIT_DISC, None)
if multi_exit_disc:
get_validator(MULTI_EXIT_DISC)(multi_exit_disc)
if multi_exit_disc != self.multi_exit_disc:
self._settings[MULTI_EXIT_DISC] = multi_exit_disc
return True
return False
def _update_importmaps(self, import_maps):
if set(self._settings[IMPORT_MAPS]).symmetric_difference(import_maps):
self._settings[IMPORT_MAPS] = import_maps
return True
return False
def __repr__(self):
return ('<%s(route_dist: %r, import_rts: %r, export_rts: %r, '
'soo_list: %r)>' % (self.__class__.__name__,
self.route_dist, self.import_rts,
self.export_rts, self.soo_list))
def __str__(self):
return ('VrfConf-%s' % (self.route_dist))
class VrfsConf(BaseConf):
"""Container for all VRF configurations."""
ADD_VRF_CONF_EVT, REMOVE_VRF_CONF_EVT = range(2)
VALID_EVT = frozenset([ADD_VRF_CONF_EVT, REMOVE_VRF_CONF_EVT])
def __init__(self):
super(VrfsConf, self).__init__()
self._vrfs_by_rd_rf = {}
self._vrfs_by_id = {}
def _init_opt_settings(self, **kwargs):
pass
@property
def vrf_confs(self):
"""Returns a list of configured `VrfConf`s
"""
return list(self._vrfs_by_rd_rf.values())
@property
def vrf_interested_rts(self):
interested_rts = set()
for vrf_conf in self._vrfs_by_id.values():
interested_rts.update(vrf_conf.import_rts)
return interested_rts
def update(self, **kwargs):
raise NotImplementedError('Use either add/remove_vrf_conf'
' methods instead.')
def add_vrf_conf(self, vrf_conf):
if vrf_conf.rd_rf_id in self._vrfs_by_rd_rf.keys():
raise RuntimeConfigError(
desc='VrfConf with rd_rf %s already exists'
% str(vrf_conf.rd_rf_id)
)
if vrf_conf.id in self._vrfs_by_id:
raise RuntimeConfigError(
desc='VrfConf with id %s already exists' % str(vrf_conf.id)
)
self._vrfs_by_rd_rf[vrf_conf.rd_rf_id] = vrf_conf
self._vrfs_by_id[vrf_conf.id] = vrf_conf
self._notify_listeners(VrfsConf.ADD_VRF_CONF_EVT, vrf_conf)
def remove_vrf_conf(self, route_dist=None, vrf_id=None,
vrf_rf=None):
"""Removes any matching `VrfConf` for given `route_dist` or `vrf_id`
Paramters:
- `route_dist`: (str) route distinguisher of a configured VRF
- `vrf_id`: (str) vrf ID
- `vrf_rf`: (str) route family of the VRF configuration
If only `route_dist` is given, removes `VrfConf`s for all supported
address families for this `route_dist`. If `vrf_rf` is given, than only
removes `VrfConf` for that specific route family. If only `vrf_id` is
given, matching `VrfConf` will be removed.
"""
if route_dist is None and vrf_id is None:
raise RuntimeConfigError(desc='To delete supply route_dist or id.')
# By default we remove all VRFs for given Id or RD
vrf_rfs = SUPPORTED_VRF_RF
# If asked to delete specific route family vrf conf.
if vrf_rf:
vrf_rfs = (vrf_rf)
# For all vrf route family asked to be deleted, we collect all deleted
# VrfConfs
removed_vrf_confs = []
for route_family in vrf_rfs:
if route_dist is not None:
rd_rf_id = VrfConf.create_rd_rf_id(route_dist, route_family)
vrf_conf = self._vrfs_by_rd_rf.pop(rd_rf_id, None)
if vrf_conf:
self._vrfs_by_id.pop(vrf_conf.id, None)
removed_vrf_confs.append(vrf_conf)
else:
vrf_conf = self._vrfs_by_id.pop(vrf_id, None)
if vrf_conf:
self._vrfs_by_rd_rf.pop(vrf_conf.rd_rd_id, None)
removed_vrf_confs.append(vrf_conf)
# We do not raise any exception if we cannot find asked VRF.
for vrf_conf in removed_vrf_confs:
self._notify_listeners(VrfsConf.REMOVE_VRF_CONF_EVT, vrf_conf)
return removed_vrf_confs
def get_vrf_conf(self, route_dist, vrf_rf, vrf_id=None):
if route_dist is None and vrf_id is None:
raise RuntimeConfigError(desc='To get VRF supply route_dist '
'or vrf_id.')
vrf = None
if route_dist is not None and vrf_id is not None:
vrf1 = self._vrfs_by_id.get(vrf_id)
rd_rf_id = VrfConf.create_rd_rf_id(route_dist, vrf_rf)
vrf2 = self._vrfs_by_rd_rf.get(rd_rf_id)
if vrf1 is not vrf2:
raise RuntimeConfigError(desc='Given VRF ID (%s) and RD (%s)'
' are not of same VRF.' %
(vrf_id, route_dist))
vrf = vrf1
elif route_dist is not None:
rd_rf_id = VrfConf.create_rd_rf_id(route_dist, vrf_rf)
vrf = self._vrfs_by_rd_rf.get(rd_rf_id)
else:
vrf = self._vrfs_by_id.get(vrf_id)
return vrf
@property
def vrfs_by_rd_rf_id(self):
return dict(self._vrfs_by_rd_rf)
@classmethod
def get_valid_evts(self):
self_valid_evts = super(VrfsConf, self).get_valid_evts()
self_valid_evts.update(VrfsConf.VALID_EVT)
return self_valid_evts
def __repr__(self):
return '<%s(%r)>' % (self.__class__.__name__, self._vrfs_by_id)
@property
def settings(self):
return [vrf.settings for vrf in self._vrfs_by_id.values()]
class VrfConfListener(ConfWithIdListener, ConfWithStatsListener):
"""Base listener for various VRF configuration change event."""
def __init__(self, vrf_conf):
super(VrfConfListener, self).__init__(vrf_conf)
vrf_conf.add_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
def on_chg_vrf_conf(self, evt):
raise NotImplementedError('This method should be overridden')
class VrfsConfListener(BaseConfListener):
"""Base listener for VRF container change events."""
def __init__(self, vrfs_conf):
super(VrfsConfListener, self).__init__(vrfs_conf)
vrfs_conf.add_listener(VrfsConf.ADD_VRF_CONF_EVT, self.on_add_vrf_conf)
vrfs_conf.add_listener(VrfsConf.REMOVE_VRF_CONF_EVT,
self.on_remove_vrf_conf)
@abc.abstractmethod
def on_add_vrf_conf(self, evt):
raise NotImplementedError('This method should be overridden')
@abc.abstractmethod
def on_remove_vrf_conf(self, evt):
raise NotImplementedError('This method should be overridden')
|
marcore/edx-platform | refs/heads/master | openedx/core/djangoapps/credit/tests/test_verification_access.py | 14 | """
Tests for in-course reverification user partition creation.
This should really belong to the verify_student app,
but we can't move it there because it's in the LMS and we're
currently applying these rules on publish from Studio.
In the future, this functionality should be a course transformation
defined in the verify_student app, and these tests should be moved
into verify_student.
"""
from mock import patch
from nose.plugins.attrib import attr
from django.conf import settings
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.partition_schemes import VerificationPartitionScheme
from openedx.core.djangoapps.credit.verification_access import update_verification_partitions
from openedx.core.djangoapps.credit.signals import on_pre_publish
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import SignalHandler
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls_range
from xmodule.partitions.partitions import Group, UserPartition
@attr('shard_2')
class CreateVerificationPartitionTest(ModuleStoreTestCase):
"""
Tests for applying verification access rules.
"""
# Run the tests in split modulestore
# While verification access will work in old-Mongo, it's not something
# we're committed to supporting, since this feature is meant for use
# in new courses.
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@patch.dict(settings.FEATURES, {"ENABLE_COURSEWARE_INDEX": False})
def setUp(self):
super(CreateVerificationPartitionTest, self).setUp()
# Disconnect the signal receiver -- we'll invoke the update code ourselves
SignalHandler.pre_publish.disconnect(receiver=on_pre_publish)
self.addCleanup(SignalHandler.pre_publish.connect, receiver=on_pre_publish)
# Create a dummy course with a single verification checkpoint
# Because we need to check "exam" content surrounding the ICRV checkpoint,
# we need to create a fairly large course structure, with multiple sections,
# subsections, verticals, units, and items.
self.course = CourseFactory()
self.sections = [
ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section A'),
ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section B'),
]
self.subsections = [
ItemFactory.create(parent=self.sections[0], category='sequential', display_name='Test Subsection A 1'),
ItemFactory.create(parent=self.sections[0], category='sequential', display_name='Test Subsection A 2'),
ItemFactory.create(parent=self.sections[1], category='sequential', display_name='Test Subsection B 1'),
ItemFactory.create(parent=self.sections[1], category='sequential', display_name='Test Subsection B 2'),
]
self.verticals = [
ItemFactory.create(parent=self.subsections[0], category='vertical', display_name='Test Unit A 1 a'),
ItemFactory.create(parent=self.subsections[0], category='vertical', display_name='Test Unit A 1 b'),
ItemFactory.create(parent=self.subsections[1], category='vertical', display_name='Test Unit A 2 a'),
ItemFactory.create(parent=self.subsections[1], category='vertical', display_name='Test Unit A 2 b'),
ItemFactory.create(parent=self.subsections[2], category='vertical', display_name='Test Unit B 1 a'),
ItemFactory.create(parent=self.subsections[2], category='vertical', display_name='Test Unit B 1 b'),
ItemFactory.create(parent=self.subsections[3], category='vertical', display_name='Test Unit B 2 a'),
ItemFactory.create(parent=self.subsections[3], category='vertical', display_name='Test Unit B 2 b'),
]
self.icrv = ItemFactory.create(parent=self.verticals[0], category='edx-reverification-block')
self.sibling_problem = ItemFactory.create(parent=self.verticals[0], category='problem')
def test_creates_user_partitions(self):
self._update_partitions()
# Check that a new user partition was created for the ICRV block
self.assertEqual(len(self.course.user_partitions), 1)
partition = self.course.user_partitions[0]
self.assertEqual(partition.scheme.name, "verification")
self.assertEqual(partition.parameters["location"], unicode(self.icrv.location))
# Check that the groups for the partition were created correctly
self.assertEqual(len(partition.groups), 2)
self.assertItemsEqual(
[g.id for g in partition.groups],
[
VerificationPartitionScheme.ALLOW,
VerificationPartitionScheme.DENY,
]
)
@patch.dict(settings.FEATURES, {"ENABLE_COURSEWARE_INDEX": False})
def test_removes_deleted_user_partitions(self):
self._update_partitions()
# Delete the reverification block, then update the partitions
self.store.delete_item(
self.icrv.location,
ModuleStoreEnum.UserID.test,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self._update_partitions()
# Check that the user partition was marked as inactive
self.assertEqual(len(self.course.user_partitions), 1)
partition = self.course.user_partitions[0]
self.assertFalse(partition.active)
self.assertEqual(partition.scheme.name, "verification")
@patch.dict(settings.FEATURES, {"ENABLE_COURSEWARE_INDEX": False})
def test_preserves_partition_id_for_verified_partitions(self):
self._update_partitions()
partition_id = self.course.user_partitions[0].id
self._update_partitions()
new_partition_id = self.course.user_partitions[0].id
self.assertEqual(partition_id, new_partition_id)
@patch.dict(settings.FEATURES, {"ENABLE_COURSEWARE_INDEX": False})
def test_preserves_existing_user_partitions(self):
# Add other, non-verified partition to the course
self.course.user_partitions = [
UserPartition(
id=0,
name='Cohort user partition',
scheme=UserPartition.get_scheme('cohort'),
description='Cohorted user partition',
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name='Random user partition',
scheme=UserPartition.get_scheme('random'),
description='Random user partition',
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
]
self.course = self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
# Update the verification partitions.
# The existing partitions should still be available
self._update_partitions()
partition_ids = [p.id for p in self.course.user_partitions]
self.assertEqual(len(partition_ids), 3)
self.assertIn(0, partition_ids)
self.assertIn(1, partition_ids)
def test_multiple_reverification_blocks(self):
# Add an additional ICRV block in another section
other_icrv = ItemFactory.create(parent=self.verticals[3], category='edx-reverification-block')
self._update_partitions()
# Expect that both ICRV blocks have corresponding partitions
self.assertEqual(len(self.course.user_partitions), 2)
partition_locations = [p.parameters.get("location") for p in self.course.user_partitions]
self.assertIn(unicode(self.icrv.location), partition_locations)
self.assertIn(unicode(other_icrv.location), partition_locations)
# Delete the first ICRV block and update partitions
icrv_location = self.icrv.location
self.store.delete_item(
self.icrv.location,
ModuleStoreEnum.UserID.test,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self._update_partitions()
# Expect that the correct partition is marked as inactive
self.assertEqual(len(self.course.user_partitions), 2)
partitions_by_loc = {
p.parameters["location"]: p
for p in self.course.user_partitions
}
self.assertFalse(partitions_by_loc[unicode(icrv_location)].active)
self.assertTrue(partitions_by_loc[unicode(other_icrv.location)].active)
def test_query_counts_with_no_reverification_blocks(self):
# Delete the ICRV block, so the number of ICRV blocks is zero
self.store.delete_item(
self.icrv.location,
ModuleStoreEnum.UserID.test,
revision=ModuleStoreEnum.RevisionOption.published_only
)
# 2 calls: get the course (definitions + structures)
# 2 calls: look up ICRV blocks in the course (definitions + structures)
with check_mongo_calls_range(max_finds=4, max_sends=2):
self._update_partitions(reload_items=False)
def test_query_counts_with_one_reverification_block(self):
# One ICRV block created in the setup method
# Additional call to load the ICRV block
with check_mongo_calls_range(max_finds=5, max_sends=3):
self._update_partitions(reload_items=False)
def test_query_counts_with_multiple_reverification_blocks(self):
# Total of two ICRV blocks (one created in setup method)
# Additional call to load each ICRV block
ItemFactory.create(parent=self.verticals[3], category='edx-reverification-block')
with check_mongo_calls_range(max_finds=6, max_sends=3):
self._update_partitions(reload_items=False)
def _update_partitions(self, reload_items=True):
"""Update user partitions in the course descriptor, then reload the content. """
update_verification_partitions(self.course.id) # pylint: disable=no-member
# Reload each component so we can see the changes
if reload_items:
self.course = self.store.get_course(self.course.id) # pylint: disable=no-member
self.sections = [self._reload_item(section.location) for section in self.sections]
self.subsections = [self._reload_item(subsection.location) for subsection in self.subsections]
self.verticals = [self._reload_item(vertical.location) for vertical in self.verticals]
self.icrv = self._reload_item(self.icrv.location)
self.sibling_problem = self._reload_item(self.sibling_problem.location)
def _reload_item(self, location):
"""Safely reload an item from the moduelstore. """
try:
return self.store.get_item(location)
except ItemNotFoundError:
return None
@attr('shard_2')
class WriteOnPublishTest(ModuleStoreTestCase):
"""
Verify that updates to the course descriptor's
user partitions are written automatically on publish.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@patch.dict(settings.FEATURES, {"ENABLE_COURSEWARE_INDEX": False})
def setUp(self):
super(WriteOnPublishTest, self).setUp()
# Create a dummy course with an ICRV block
self.course = CourseFactory()
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.subsection = ItemFactory.create(parent=self.section, category='sequential', display_name='Test Subsection')
self.vertical = ItemFactory.create(parent=self.subsection, category='vertical', display_name='Test Unit')
self.icrv = ItemFactory.create(parent=self.vertical, category='edx-reverification-block')
# Mark the course as credit
CreditCourse.objects.create(course_key=self.course.id, enabled=True) # pylint: disable=no-member
@patch.dict(settings.FEATURES, {"ENABLE_COURSEWARE_INDEX": False})
def test_can_write_on_publish_signal(self):
# Sanity check -- initially user partitions should be empty
self.assertEqual(self.course.user_partitions, [])
# Make and publish a change to a block, which should trigger the publish signal
with self.store.bulk_operations(self.course.id): # pylint: disable=no-member
self.icrv.display_name = "Updated display name"
self.store.update_item(self.icrv, ModuleStoreEnum.UserID.test)
self.store.publish(self.icrv.location, ModuleStoreEnum.UserID.test)
# Within the test, the course pre-publish signal should have fired synchronously
# Since the course is marked as credit, the in-course verification partitions
# should have been created.
# We need to verify that these changes were actually persisted to the modulestore.
retrieved_course = self.store.get_course(self.course.id) # pylint: disable=no-member
self.assertEqual(len(retrieved_course.user_partitions), 1)
|
PythonCharmers/bokeh | refs/heads/master | examples/charts/file/blaze_input.py | 37 | from os.path import dirname, join
from blaze import Data
from bokeh.sampledata import iris
from bokeh.charts import Line, show, output_file
bbvalues = Data(join(dirname(iris.__file__), 'iris.csv'))
columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
result = bbvalues[columns]
output_file("blaze_input.html")
line = Line(
result, title="Line Chart",
ylabel='Petals', notebook=True, legend="top_left"
)
show(line) |
vladimiroff/asuras | refs/heads/master | asuras/player.py | 1 | from vehicle.types.normal import NormalVehicle
class Player:
def __init__(self, items_layer, *sprite_groups):
self.vehicle = NormalVehicle((320, 240), items_layer, *sprite_groups)
|
postlund/home-assistant | refs/heads/dev | tests/components/geonetnz_volcano/test_config_flow.py | 7 | """Define tests for the GeoNet NZ Volcano config flow."""
from datetime import timedelta
from homeassistant import data_entry_flow
from homeassistant.components.geonetnz_volcano import config_flow
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
CONF_UNIT_SYSTEM,
)
async def test_duplicate_error(hass, config_entry):
"""Test that errors are shown when duplicates are added."""
conf = {CONF_LATITUDE: -41.2, CONF_LONGITUDE: 174.7, CONF_RADIUS: 25}
config_entry.add_to_hass(hass)
flow = config_flow.GeonetnzVolcanoFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=conf)
assert result["errors"] == {"base": "identifier_exists"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.GeonetnzVolcanoFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import(hass):
"""Test that the import step works."""
conf = {
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_SCAN_INTERVAL: timedelta(minutes=4),
}
flow = config_flow.GeonetnzVolcanoFlowHandler()
flow.hass = hass
result = await flow.async_step_import(import_config=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "-41.2, 174.7"
assert result["data"] == {
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_SCAN_INTERVAL: 240.0,
}
async def test_step_user(hass):
"""Test that the user step works."""
hass.config.latitude = -41.2
hass.config.longitude = 174.7
conf = {CONF_RADIUS: 25}
flow = config_flow.GeonetnzVolcanoFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "-41.2, 174.7"
assert result["data"] == {
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_SCAN_INTERVAL: 300.0,
}
|
viblo/pymunk | refs/heads/master | examples/newtons_cradle.py | 1 | """A screensaver version of Newton's Cradle with an interactive mode.
"""
__docformat__ = "reStructuredText"
import os
import random
import sys
description = """
---- Newton's Cradle ----
A screensaver version of Newton's Cradle with an interactive mode
/s - Run in fullscreen screensaver mode
/p #### - Display a preview of the screensaver using a window handler
/i - Interactive mode
"""
if len(sys.argv) < 2:
print(description)
sys.exit()
is_interactive = False
display_flags = 0
if sys.argv[1] == "/p": # preview mode
os.environ["SDL_VIDEODRIVER"] = "windib"
os.environ["SDL_WINDOWID"] = sys.argv[2]
display_size = (100, 100)
is_interactive = False
### We must set OS env before the pygame imports..
import pygame
if sys.argv[1] == "/s": # fullscreen screensaver mode
display_size = (0, 0)
is_interactive = False
display_flags = (
display_flags | pygame.FULLSCREEN
) # FULLSCREEN) # | DOUBLEBUF | HWSURFACE )
elif sys.argv[1] == "/i": # interactive
display_size = (600, 600)
is_interactive = True
import pymunk as pm
from pymunk import Vec2d
def drawcircle(image, colour, origin, radius, width=0):
if width == 0:
pygame.draw.circle(image, colour, origin, int(radius))
else:
if radius > 65534 / 5:
radius = 65534 / 5
circle = pygame.Surface(
[radius * 2 + width, radius * 2 + width]
).convert_alpha()
circle.fill([0, 0, 0, 0])
pygame.draw.circle(
circle,
colour,
[circle.get_width() / 2, circle.get_height() / 2],
radius + (width / 2),
)
if int(radius - (width / 2)) > 0:
pygame.draw.circle(
circle,
[0, 0, 0, 0],
[circle.get_width() / 2, circle.get_height() / 2],
abs(int(radius - (width / 2))),
)
image.blit(
circle,
[
origin[0] - (circle.get_width() / 2),
origin[1] - (circle.get_height() / 2),
],
)
def reset_bodies(space):
for body in space.bodies:
body.position = Vec2d(*body.start_position)
body.force = 0, 0
body.torque = 0
body.velocity = 0, 0
body.angular_velocity = 0
color = pygame.Color(
random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)
)
for shape in space.shapes:
shape.color = color
def main():
pygame.init()
screen = pygame.display.set_mode(display_size, display_flags)
width, height = screen.get_size()
def to_pygame(p):
"""Small hack to convert pymunk to pygame coordinates"""
return int(p.x), int(-p.y + height)
def from_pygame(p):
return to_pygame(p)
clock = pygame.time.Clock()
running = True
font = pygame.font.Font(None, 16)
### Physics stuff
space = pm.Space()
space.gravity = (0.0, -1900.0)
space.damping = 0.999 # to prevent it from blowing up.
mouse_body = pm.Body(body_type=pm.Body.KINEMATIC)
bodies = []
for x in range(-100, 150, 50):
x += width / 2
offset_y = height / 2
mass = 10
radius = 25
moment = pm.moment_for_circle(mass, 0, radius, (0, 0))
body = pm.Body(mass, moment)
body.position = (x, -125 + offset_y)
body.start_position = Vec2d(*body.position)
shape = pm.Circle(body, radius)
shape.elasticity = 0.9999999
space.add(body, shape)
bodies.append(body)
pj = pm.PinJoint(space.static_body, body, (x, 125 + offset_y), (0, 0))
space.add(pj)
reset_bodies(space)
selected = None
if not is_interactive:
pygame.time.set_timer(pygame.USEREVENT + 1, 70000) # apply force
pygame.time.set_timer(pygame.USEREVENT + 2, 120000) # reset
pygame.event.post(pygame.event.Event(pygame.USEREVENT + 1))
pygame.mouse.set_visible(False)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(screen, "newtons_cradle.png")
if event.type == pygame.USEREVENT + 1:
r = random.randint(1, 4)
for body in bodies[0:r]:
body.apply_impulse_at_local_point((-6000, 0))
if event.type == pygame.USEREVENT + 2:
reset_bodies(space)
elif (
event.type == pygame.KEYDOWN
and event.key == pygame.K_r
and is_interactive
):
reset_bodies(space)
elif (
event.type == pygame.KEYDOWN
and event.key == pygame.K_f
and is_interactive
):
r = random.randint(1, 4)
for body in bodies[0:r]:
body.apply_impulse_at_local_point((-6000, 0))
elif event.type == pygame.MOUSEBUTTONDOWN and is_interactive:
if selected != None:
space.remove(selected)
p = from_pygame(Vec2d(*event.pos))
hit = space.point_query_nearest(p, 0, pm.ShapeFilter())
if hit != None:
shape = hit.shape
rest_length = mouse_body.position.get_distance(shape.body.position)
ds = pm.DampedSpring(
mouse_body, shape.body, (0, 0), (0, 0), rest_length, 1000, 10
)
space.add(ds)
selected = ds
elif event.type == pygame.MOUSEBUTTONUP and is_interactive:
if selected != None:
space.remove(selected)
selected = None
elif event.type == pygame.KEYDOWN:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
running = False
mpos = pygame.mouse.get_pos()
p = from_pygame(Vec2d(*mpos))
mouse_body.position = p
### Clear screen
screen.fill(pygame.Color("black"))
### Draw stuff
for c in space.constraints:
pv1 = c.a.position + c.anchor_a
pv2 = c.b.position + c.anchor_b
p1 = to_pygame(pv1)
p2 = to_pygame(pv2)
pygame.draw.aalines(screen, pygame.Color("lightgray"), False, [p1, p2])
for ball in space.shapes:
p = to_pygame(ball.body.position)
drawcircle(screen, ball.color, p, int(ball.radius), 0)
# pygame.draw.circle(screen, ball.color, p, int(ball.radius), 0)
### Update physics
fps = 50
iterations = 25
dt = 1.0 / float(fps) / float(iterations)
for x in range(iterations): # 10 iterations to get a more stable simulation
space.step(dt)
### Flip screen
if is_interactive:
screen.blit(
font.render(
"fps: " + str(clock.get_fps()), True, pygame.Color("white")
),
(0, 0),
)
screen.blit(
font.render(
"Press left mouse button and drag to interact",
True,
pygame.Color("darkgrey"),
),
(5, height - 35),
)
screen.blit(
font.render(
"Press R to reset, any other key to quit",
True,
pygame.Color("darkgrey"),
),
(5, height - 20),
)
pygame.display.flip()
clock.tick(fps)
if __name__ == "__main__":
sys.exit(main())
|
yuanagain/seniorthesis | refs/heads/master | venv/lib/python2.7/site-packages/matplotlib/colors.py | 4 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of colors called
a colormap. Colormapping typically involves two steps: a data array is first
mapped onto the range 0-1 using an instance of :class:`Normalize` or of a
subclass; then this number in the 0-1 range is mapped to a color using an
instance of a subclass of :class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all the built-in
colormap instances, but is also useful for making custom colormaps, and
:class:`ListedColormap`, which is used for generating a custom colormap from a
list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single color
specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic built-in colors, you can use a single letter
- b: blue
- g: green
- r: red
- c: cyan
- m: magenta
- y: yellow
- k: black
- w: white
Gray shades can be given as a string encoding a float in the 0-1 range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify the
color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B* are in
the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and 'chartreuse'
are supported.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
import warnings
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
cnames = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksage': '#598556',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsage': '#BCECAC',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sage': '#87AE73',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
# add british equivs
for k, v in list(six.iteritems(cnames)):
if k.find('gray') >= 0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given an rgb or rgba sequence of 0-1 floats, return the hex string'
a = '#%02x%02x%02x' % tuple([int(np.round(val * 255)) for val in rgb[:3]])
return a
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, six.string_types):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16) / 255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter(object):
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b': (0.0, 0.0, 1.0),
'g': (0.0, 0.5, 0.0),
'r': (1.0, 0.0, 0.0),
'c': (0.0, 0.75, 0.75),
'm': (0.75, 0, 0.75),
'y': (0.75, 0.75, 0),
'k': (0.0, 0.0, 0.0),
'w': (1.0, 1.0, 1.0), }
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a string representation of a float, like '0.4',
indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
try:
return self.cache[arg]
except KeyError:
pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try:
return self.cache[arg]
except KeyError:
pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
argl = arg.lower()
color = self.colors.get(argl, None)
if color is None:
str1 = cnames.get(argl, argl)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(argl)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = (fl,)*3
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4' % len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
raise ValueError(
'cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError) as exc:
raise ValueError(
'to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
In addition, if *arg* is "none" (case-insensitive),
then (0,0,0,0) will be returned.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if arg.lower() == 'none':
return (0.0, 0.0, 0.0, 0.0)
except AttributeError:
pass
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if any(float(x) < 0 or x > 1 for x in arg):
raise ValueError(
'number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], alpha
if len(arg) == 3:
r, g, b = arg
if any(float(x) < 0 or x > 1 for x in arg):
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
raise ValueError(
'length of rgba sequence should be either 3 or 4')
else:
r, g, b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r, g, b, alpha
except (TypeError, ValueError) as exc:
raise ValueError(
'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
nc = len(c)
except TypeError:
raise ValueError(
"Cannot convert argument type %s to rgba array" % type(c))
try:
if nc == 0 or c.lower() == 'none':
return np.zeros((0, 4), dtype=np.float)
except AttributeError:
pass
try:
# Single value? Put it in an array with a single row.
return np.array([self.to_rgba(c, alpha)], dtype=np.float)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
if (c.ndim == 2 and c.shape[1] == 4 and c.dtype.kind == 'f'):
if (c.ravel() > 1).any() or (c.ravel() < 0).any():
raise ValueError(
"number in rgba sequence is outside 0-1 range")
result = np.asarray(c, np.float)
if alpha is not None:
if alpha > 1 or alpha < 0:
raise ValueError("alpha must be in 0-1 range")
result[:, 3] = alpha
return result
# This alpha operation above is new, and depends
# on higher levels to refrain from setting alpha
# to values other than None unless there is
# intent to override any existing alpha values.
# It must be some other sequence of color specs.
result = np.zeros((nc, 4), dtype=np.float)
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha)
return result
colorConverter = ColorConverter()
def makeMappingArray(N, data, gamma=1.0):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
Alternatively, data can be a function mapping values between 0 - 1
to 0 - 1.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
if six.callable(data):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=np.float), 0, 1)
return lut
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 or shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x) - x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N - 1)
lut = np.zeros((N,), np.float)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
lut[1:-1] = distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1]
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
return np.clip(lut, 0.0, 1.0)
class Colormap(object):
"""
Baseclass for all scalar to RGBA mappings.
Typically Colormap instances are used to convert data values (floats) from
the interval ``[0, 1]`` to the RGBA color that the respective Colormap
represents. For scaling of data into the ``[0, 1]`` interval see
:class:`matplotlib.colors.Normalize`. It is worth noting that
:class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this
``data->normalize->map-to-color`` processing chain.
"""
def __init__(self, name, N=256):
r"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of rgb quantization levels.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: :class:`matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
"""
Parameters
----------
X : scalar, ndarray
The data value(s) to convert to RGBA.
For floats, X should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, X should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float, None
Alpha must be a scalar between 0 and 1, or None.
bytes : bool
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be uint8s in the interval
``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, othewise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
# See class docstring for arg/kwarg documentation.
if not self._isinit:
self._init()
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.array(X, copy=True) # Copy here to avoid side effects.
mask_bad = xma.mask # Mask will be used below.
xa = xma.filled() # Fill to avoid infs, etc.
del xma
# Calculations with native byteorder are faster, and avoid a
# bug that otherwise can occur with putmask when the last
# argument is a numpy scalar.
if not xa.dtype.isnative:
xa = xa.byteswap().newbyteorder()
if xa.dtype.kind == "f":
# Treat 1.0 as slightly less than 1.
vals = np.array([1, 0], dtype=xa.dtype)
almost_one = np.nextafter(*vals)
cbook._putmask(xa, xa == 1.0, almost_one)
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
xa *= self.N
np.clip(xa, -1, self.N, out=xa)
# ensure that all 'under' values will still have negative
# value after casting to int
cbook._putmask(xa, xa < 0.0, -1)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
cbook._putmask(xa, xa > self.N - 1, self._i_over)
cbook._putmask(xa, xa < 0, self._i_under)
if mask_bad is not None:
if mask_bad.shape == xa.shape:
cbook._putmask(xa, mask_bad, self._i_bad)
elif mask_bad:
xa.fill(self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut.copy() # Don't let alpha modify original _lut.
if alpha is not None:
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
if bytes:
alpha = int(alpha * 255)
if (lut[-1] == 0).all():
lut[:-1, -1] = alpha
# All zeros is taken as a flag for the default bad
# color, which is no color--fully transparent. We
# don't want to override this.
else:
lut[:, -1] = alpha
# If the bad value is set to have a color, then we
# override its alpha just as for any other value.
rgba = np.empty(shape=xa.shape + (4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
if vtype == 'scalar':
rgba = tuple(rgba[0, :])
return rgba
def set_bad(self, color='k', alpha=None):
"""Set color to be used for masked values.
"""
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_under(self, color='k', alpha=None):
"""Set color to be used for low out-of-range values.
Requires norm.clip = False
"""
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_over(self, color='k', alpha=None):
"""Set color to be used for high out-of-range values.
Requires norm.clip = False
"""
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
"""Generate the lookup table, self._lut"""
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit:
self._init()
return (np.alltrue(self._lut[:, 0] == self._lut[:, 1]) and
np.alltrue(self._lut[:, 0] == self._lut[:, 2]))
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
raise NotImplementedError()
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table. Entries for alpha are optional.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:meth:`LinearSegmentedColormap.from_list`
Static method; factory function for generating a
smoothly-varying LinearSegmentedColormap.
:func:`makeMappingArray`
For information about making a mapping array.
"""
# True only if all colors in map are identical; needed for contouring.
self.monochrome = False
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(
self.N, self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = makeMappingArray(
self.N, self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = makeMappingArray(
self.N, self._segmentdata['blue'], self._gamma)
if 'alpha' in self._segmentdata:
self._lut[:-3, 3] = makeMappingArray(
self.N, self._segmentdata['alpha'], 1)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""
Set a new gamma value and regenerate color map.
"""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Make a linear segmented colormap with *name* from a sequence
of *colors* which evenly transitions from colors[0] at val=0
to colors[-1] at val=1. *N* is the number of rgb quantization
levels.
Alternatively, a list of (value, color) tuples can be given
to divide the range unevenly.
"""
if not cbook.iterable(colors):
raise ValueError('colors must be iterable')
if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \
not cbook.is_string_like(colors[0]):
# List of value, color pairs
vals, colors = list(zip(*colors))
else:
vals = np.linspace(0., 1., len(colors))
cdict = dict(red=[], green=[], blue=[], alpha=[])
for val, color in zip(vals, colors):
r, g, b, a = colorConverter.to_rgba(color)
cdict['red'].append((val, r, r))
cdict['green'].append((val, g, g))
cdict['blue'].append((val, b, b))
cdict['alpha'].append((val, a, a))
return LinearSegmentedColormap(name, cdict, N, gamma)
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
return LinearSegmentedColormap(self.name, self._segmentdata, lutsize)
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name='from_list', N=None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 or Nx4 floating point array
(*N* rgb or rgba values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are
# identical; needed for contouring.
if N is None:
N = len(self.colors)
else:
if (cbook.is_string_like(self.colors) and
cbook.is_hashable(self.colors)):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try:
gray = float(self.colors)
except TypeError:
pass
else:
self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgba = colorConverter.to_rgba_array(self.colors)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3] = rgba
self._isinit = True
self._set_extremes()
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
colors = self(np.linspace(0, 1, lutsize))
return ListedColormap(colors, name=self.name)
class Normalize(object):
"""
A class which, when called, can normalize data into
the ``[0.0, 1.0]`` interval.
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are initialized from the
minimum and maximum value respectively of the first input
processed. That is, *__call__(A)* calls *autoscale_None(A)*.
If *clip* is *True* and the given value falls outside the range,
the returned value will be 0 or 1, whichever is closer.
Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Returns *result*, *is_scalar*, where *result* is a
masked array matching *value*. Float dtypes are preserved;
integer types with two bytes or smaller are converted to
np.float32, and larger types are converted to np.float.
Preserving float32 when possible, and using in-place operations,
can greatly improve speed for large arrays.
Experimental; we may want to add an option to force the
use of float32.
"""
if cbook.iterable(value):
is_scalar = False
result = ma.asarray(value)
if result.dtype.kind == 'f':
# this is overkill for lists of floats, but required
# to support pd.Series as input until we can reliable
# determine if result and value share memory in all cases
# (list, tuple, deque, ndarray, Series, ...)
result = result.copy()
elif result.dtype.itemsize > 2:
result = result.astype(np.float)
else:
result = result.astype(np.float32)
else:
is_scalar = True
result = ma.array([value]).astype(np.float)
return result, is_scalar
def __call__(self, value, clip=None):
"""
Normalize *value* data in the ``[vmin, vmax]`` interval into
the ``[0.0, 1.0]`` interval and return it. *clip* defaults
to *self.clip* (which defaults to *False*). If not already
initialized, *vmin* and *vmax* are initialized using
*autoscale_None(value)*.
"""
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
# use np.asarray so data passed in as an ndarray subclass are
# interpreted as an ndarray. See issue #6622.
resdat = np.asarray(result.data)
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin = float(self.vmin)
vmax = float(self.vmax)
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
result = ma.masked_less_equal(result, 0, copy=False)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= resdat <= 0
cbook._putmask(resdat, mask, 1)
np.log(resdat, resdat)
resdat -= np.log(vmin)
resdat /= (np.log(vmax) - np.log(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax / vmin), val)
else:
return vmin * pow((vmax / vmin), value)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
A = ma.masked_less_equal(A, 0, copy=False)
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is not None and self.vmax is not None:
return
A = ma.masked_less_equal(A, 0, copy=False)
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
class SymLogNorm(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
def __init__(self, linthresh, linscale=1.0,
vmin=None, vmax=None, clip=False):
"""
*linthresh*:
The range within which the plot is linear (to
avoid having the plot go to infinity around zero).
*linscale*:
This allows the linear range (-*linthresh* to *linthresh*)
to be stretched relative to the logarithmic range. Its
value is the number of decades to use for each half of the
linear range. For example, when *linscale* == 1.0 (the
default), the space used for the positive and negative
halves of the linear range will be equal to one decade in
the logarithmic range. Defaults to 1.
"""
Normalize.__init__(self, vmin, vmax, clip)
self.linthresh = float(linthresh)
self._linscale_adj = (linscale / (1.0 - np.e ** -1))
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = self._transform(result.data)
resdat -= self._lower
resdat /= (self._upper - self._lower)
if is_scalar:
result = result[0]
return result
def _transform(self, a):
"""
Inplace transformation.
"""
masked = np.abs(a) > self.linthresh
sign = np.sign(a[masked])
log = (self._linscale_adj + np.log(np.abs(a[masked]) / self.linthresh))
log *= sign * self.linthresh
a[masked] = log
a[~masked] *= self._linscale_adj
return a
def _inv_transform(self, a):
"""
Inverse inplace Transformation.
"""
masked = np.abs(a) > (self.linthresh * self._linscale_adj)
sign = np.sign(a[masked])
exp = np.exp(sign * a[masked] / self.linthresh - self._linscale_adj)
exp *= sign * self.linthresh
a[masked] = exp
a[~masked] /= self._linscale_adj
return a
def _transform_vmin_vmax(self):
"""
Calculates vmin and vmax in the transformed system.
"""
vmin, vmax = self.vmin, self.vmax
arr = np.array([vmax, vmin]).astype(np.float)
self._upper, self._lower = self._transform(arr)
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
val = ma.asarray(value)
val = val * (self._upper - self._lower) + self._lower
return self._inv_transform(val)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
self._transform_vmin_vmax()
def autoscale_None(self, A):
""" autoscale only None-valued vmin or vmax """
if self.vmin is not None and self.vmax is not None:
pass
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
self._transform_vmin_vmax()
class PowerNorm(Normalize):
"""
Normalize a given value to the ``[0, 1]`` interval with a power-law
scaling. This will clip any negative data points to 0.
"""
def __init__(self, gamma, vmin=None, vmax=None, clip=False):
Normalize.__init__(self, vmin, vmax, clip)
self.gamma = gamma
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
res_mask = result.data < 0
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
resdat = result.data
resdat -= vmin
np.power(resdat, gamma, resdat)
resdat /= (vmax - vmin) ** gamma
result = np.ma.array(resdat, mask=result.mask, copy=False)
result[res_mask] = 0
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return ma.power(val, 1. / gamma) * (vmax - vmin) + vmin
else:
return pow(value, 1. / gamma) * (vmax - vmin) + vmin
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
class BoundaryNorm(Normalize):
"""
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
"""
def __init__(self, boundaries, ncolors, clip=False):
"""
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped
to -1 if low and ncolors if high; these are converted
to valid indices by
:meth:`Colormap.__call__` .
If clip == True, out-of-range values
are mapped to 0 if low and ncolors-1 if high.
"""
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N - 1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = ma.getmaskarray(xx)
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
iret = np.zeros(xx.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx >= b] = i
if self._interp:
scalefac = float(self.Ncmap - 1) / (self.N - 2)
iret = (iret * scalefac).astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
"""
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
"""
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
def rgb_to_hsv(arr):
"""
convert float rgb values (in the range [0, 1]), in a numpy array to hsv
values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
hsv : (..., 3) ndarray
Colors converted to hsv values in range [0, 1]
"""
# make sure it is an ndarray
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=arr.shape))
in_ndim = arr.ndim
if arr.ndim == 1:
arr = np.array(arr, ndmin=2)
# make sure we don't have an int image
if arr.dtype.kind in ('iu'):
arr = arr.astype(np.float32)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
if in_ndim == 1:
out.shape = (3,)
return out
def hsv_to_rgb(hsv):
"""
convert hsv values in a numpy array to rgb values
all values assumed to be in range [0, 1]
Parameters
----------
hsv : (..., 3) array-like
All values assumed to be in range [0, 1]
Returns
-------
rgb : (..., 3) ndarray
Colors converted to RGB values in range [0, 1]
"""
hsv = np.asarray(hsv)
# check length of the last dimension, should be _some_ sort of rgb
if hsv.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=hsv.shape))
# if we got pased a 1D array, try to treat as
# a single color and reshape as needed
in_ndim = hsv.ndim
if in_ndim == 1:
hsv = np.array(hsv, ndmin=2)
# make sure we don't have an int image
if hsv.dtype.kind in ('iu'):
hsv = hsv.astype(np.float32)
h = hsv[..., 0]
s = hsv[..., 1]
v = hsv[..., 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(np.int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.empty_like(hsv)
rgb[..., 0] = r
rgb[..., 1] = g
rgb[..., 2] = b
if in_ndim == 1:
rgb.shape = (3, )
return rgb
class LightSource(object):
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
The :meth:`shade` is used to produce "shaded" rgb values for a data array.
:meth:`shade_rgb` can be used to combine an rgb image with
The :meth:`shade_rgb`
The :meth:`hillshade` produces an illumination map of a surface.
"""
def __init__(self, azdeg=315, altdeg=45, hsv_min_val=0, hsv_max_val=1,
hsv_min_sat=1, hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
Parameters
----------
azdeg : number, optional
The azimuth (0-360, degrees clockwise from North) of the light
source. Defaults to 315 degrees (from the northwest).
altdeg : number, optional
The altitude (0-90, degrees up from horizontal) of the light
source. Defaults to 45 degrees from horizontal.
Notes
-----
For backwards compatibility, the parameters *hsv_min_val*,
*hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at
initialization as well. However, these parameters will only be used if
"blend_mode='hsv'" is passed into :meth:`shade` or :meth:`shade_rgb`.
See the documentation for :meth:`blend_hsv` for more details.
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
def hillshade(self, elevation, vert_exag=1, dx=1, dy=1, fraction=1.):
"""
Calculates the illumination intensity for a surface using the defined
azimuth and elevation for the light source.
Imagine an artificial sun placed at infinity in some azimuth and
elevation position illuminating our surface. The parts of the surface
that slope toward the sun should brighten while those sides facing away
should become darker.
Parameters
----------
elevation : array-like
A 2d array (or equivalent) of the height values used to generate an
illumination map
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topographic effects.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Returns
-------
intensity : ndarray
A 2d array of illumination values between 0-1, where 0 is
completely in shadow and 1 is completely illuminated.
"""
# Azimuth is in degrees clockwise from North. Convert to radians
# counterclockwise from East (mathematical notation).
az = np.radians(90 - self.azdeg)
alt = np.radians(self.altdeg)
# Because most image and raster GIS data has the first row in the array
# as the "top" of the image, dy is implicitly negative. This is
# consistent to what `imshow` assumes, as well.
dy = -dy
# Calculate the intensity from the illumination angle
dy, dx = np.gradient(vert_exag * elevation, dy, dx)
# The aspect is defined by the _downhill_ direction, thus the negative
aspect = np.arctan2(-dy, -dx)
slope = 0.5 * np.pi - np.arctan(np.hypot(dx, dy))
intensity = (np.sin(alt) * np.sin(slope) +
np.cos(alt) * np.cos(slope) * np.cos(az - aspect))
# Apply contrast stretch
imin, imax = intensity.min(), intensity.max()
intensity *= fraction
# Rescale to 0-1, keeping range before contrast stretch
# If constant slope, keep relative scaling (i.e. flat should be 0.5,
# fully occluded 0, etc.)
if (imax - imin) > 1e-6:
# Strictly speaking, this is incorrect. Negative values should be
# clipped to 0 because they're fully occluded. However, rescaling
# in this manner is consistent with the previous implementation and
# visually appears better than a "hard" clip.
intensity -= imin
intensity /= (imax - imin)
intensity = np.clip(intensity, 0, 1, intensity)
return intensity
def shade(self, data, cmap, norm=None, blend_mode='hsv', vmin=None,
vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs):
"""
Combine colormapped data values with an illumination intensity map
(a.k.a. "hillshade") of the values.
Parameters
----------
data : array-like
A 2d array (or equivalent) of the height values used to generate a
shaded map.
cmap : `~matplotlib.colors.Colormap` instance
The colormap used to color the *data* array. Note that this must be
a `~matplotlib.colors.Colormap` instance. For example, rather than
passing in `cmap='gist_earth'`, use
`cmap=plt.get_cmap('gist_earth')` instead.
norm : `~matplotlib.colors.Normalize` instance, optional
The normalization used to scale values before colormapping. If
None, the input will be linearly scaled between its min and max.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data values
with the illumination intensity. For backwards compatibility, this
defaults to "hsv". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to combine an
MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade
array (also 0 to 1). (Call signature `func(rgb, illum, **kwargs)`)
Additional kwargs supplied to this function will be passed on to
the *blend_mode* function.
vmin : scalar or None, optional
The minimum value used in colormapping *data*. If *None* the
minimum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vmax : scalar or None, optional
The maximum value used in colormapping *data*. If *None* the
maximum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
rgba : ndarray
An MxNx4 array of floats ranging between 0-1.
"""
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if norm is None:
norm = Normalize(vmin=vmin, vmax=vmax)
rgb0 = cmap(norm(data))
rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode,
vert_exag=vert_exag, dx=dx, dy=dy,
fraction=fraction, **kwargs)
# Don't overwrite the alpha channel, if present.
rgb0[..., :3] = rgb1[..., :3]
return rgb0
def shade_rgb(self, rgb, elevation, fraction=1., blend_mode='hsv',
vert_exag=1, dx=1, dy=1, **kwargs):
"""
Take the input RGB array (ny*nx*3) adjust their color values
to given the impression of a shaded relief map with a
specified light source using the elevation (ny*nx).
A new RGB array ((ny*nx*3)) is returned.
Parameters
----------
rgb : array-like
An MxNx3 RGB array, assumed to be in the range of 0 to 1.
elevation : array-like
A 2d array (or equivalent) of the height values used to generate a
shaded map.
fraction : number
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data values
with the illumination intensity. For backwards compatibility, this
defaults to "hsv". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to combine an
MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade
array (also 0 to 1). (Call signature `func(rgb, illum, **kwargs)`)
Additional kwargs supplied to this function will be passed on to
the *blend_mode* function.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
shaded_rgb : ndarray
An MxNx3 array of floats ranging between 0-1.
"""
# Calculate the "hillshade" intensity.
intensity = self.hillshade(elevation, vert_exag, dx, dy, fraction)
intensity = intensity[..., np.newaxis]
# Blend the hillshade and rgb data using the specified mode
lookup = {
'hsv': self.blend_hsv,
'soft': self.blend_soft_light,
'overlay': self.blend_overlay,
}
if blend_mode in lookup:
blend = lookup[blend_mode](rgb, intensity, **kwargs)
else:
try:
blend = blend_mode(rgb, intensity, **kwargs)
except TypeError:
msg = '"blend_mode" must be callable or one of {0}'
raise ValueError(msg.format(lookup.keys))
# Only apply result where hillshade intensity isn't masked
if hasattr(intensity, 'mask'):
mask = intensity.mask[..., 0]
for i in range(3):
blend[..., i][mask] = rgb[..., i][mask]
return blend
def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None,
hsv_min_val=None, hsv_min_sat=None):
"""
Take the input data array, convert to HSV values in the given colormap,
then adjust those color values to give the impression of a shaded
relief map with a specified light source. RGBA values are returned,
which can then be used to plot the shaded image with imshow.
The color of the resulting image will be darkened by moving the (s,v)
values (in hsv colorspace) toward (hsv_min_sat, hsv_min_val) in the
shaded regions, or lightened by sliding (s,v) toward (hsv_max_sat
hsv_max_val) in regions that are illuminated. The default extremes are
chose so that completely shaded points are nearly black (s = 1, v = 0)
and completely illuminated points are nearly white (s = 0, v = 1).
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
hsv_max_sat : number, optional
The maximum saturation value that the *intensity* map can shift the
output image to. Defaults to 1.
hsv_min_sat : number, optional
The minimum saturation value that the *intensity* map can shift the
output image to. Defaults to 0.
hsv_max_val : number, optional
The maximum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 1.
hsv_min_val: number, optional
The minimum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 0.
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
# Backward compatibility...
if hsv_max_sat is None:
hsv_max_sat = self.hsv_max_sat
if hsv_max_val is None:
hsv_max_val = self.hsv_max_val
if hsv_min_sat is None:
hsv_min_sat = self.hsv_min_sat
if hsv_min_val is None:
hsv_min_val = self.hsv_min_val
# Expects a 2D intensity array scaled between -1 to 1...
intensity = intensity[..., 0]
intensity = 2 * intensity - 1
# convert to rgb, then rgb to hsv
hsv = rgb_to_hsv(rgb[:, :, 0:3])
# modify hsv values to simulate illumination.
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity > 0),
((1. - intensity) * hsv[:, :, 1] +
intensity * hsv_max_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity > 0,
((1. - intensity) * hsv[:, :, 2] +
intensity * hsv_max_val),
hsv[:, :, 2])
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity < 0),
((1. + intensity) * hsv[:, :, 1] -
intensity * hsv_min_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity < 0,
((1. + intensity) * hsv[:, :, 2] -
intensity * hsv_min_val),
hsv[:, :, 2])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] < 0., 0, hsv[:, :, 1:])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] > 1., 1, hsv[:, :, 1:])
# convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
def blend_soft_light(self, rgb, intensity):
"""
Combines an rgb image with an intensity map using "soft light"
blending. Uses the "pegtop" formula.
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2
def blend_overlay(self, rgb, intensity):
"""
Combines an rgb image with an intensity map using "overlay" blending.
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
low = 2 * intensity * rgb
high = 1 - 2 * (1 - intensity) * (1 - rgb)
return np.where(rgb <= 0.5, low, high)
def from_levels_and_colors(levels, colors, extend='neither'):
"""
A helper routine to generate a cmap and a norm instance which
behave similar to contourf's levels and colors arguments.
Parameters
----------
levels : sequence of numbers
The quantization levels used to construct the :class:`BoundaryNorm`.
Values ``v`` are quantizized to level ``i`` if
``lev[i] <= v < lev[i+1]``.
colors : sequence of colors
The fill color to use for each level. If `extend` is "neither" there
must be ``n_level - 1`` colors. For an `extend` of "min" or "max" add
one extra color, and for an `extend` of "both" add two colors.
extend : {'neither', 'min', 'max', 'both'}, optional
The behaviour when a value falls out of range of the given levels.
See :func:`~matplotlib.pyplot.contourf` for details.
Returns
-------
(cmap, norm) : tuple containing a :class:`Colormap` and a \
:class:`Normalize` instance
"""
colors_i0 = 0
colors_i1 = None
if extend == 'both':
colors_i0 = 1
colors_i1 = -1
extra_colors = 2
elif extend == 'min':
colors_i0 = 1
extra_colors = 1
elif extend == 'max':
colors_i1 = -1
extra_colors = 1
elif extend == 'neither':
extra_colors = 0
else:
raise ValueError('Unexpected value for extend: {0!r}'.format(extend))
n_data_colors = len(levels) - 1
n_expected_colors = n_data_colors + extra_colors
if len(colors) != n_expected_colors:
raise ValueError('With extend == {0!r} and n_levels == {1!r} expected'
' n_colors == {2!r}. Got {3!r}.'
''.format(extend, len(levels), n_expected_colors,
len(colors)))
cmap = ListedColormap(colors[colors_i0:colors_i1], N=n_data_colors)
if extend in ['min', 'both']:
cmap.set_under(colors[0])
else:
cmap.set_under('none')
if extend in ['max', 'both']:
cmap.set_over(colors[-1])
else:
cmap.set_over('none')
cmap.colorbar_extend = extend
norm = BoundaryNorm(levels, ncolors=n_data_colors)
return cmap, norm
|
stevenewey/django | refs/heads/master | tests/logging_tests/logconfig.py | 609 | import logging
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
class MyHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.config = settings.LOGGING
class MyEmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
pass
|
rschnapka/odoo | refs/heads/7.0 | addons/l10n_in_hr_payroll/report/report_hr_salary_employee_bymonth.py | 56 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.report import report_sxw
class report_hr_salary_employee_bymonth(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_hr_salary_employee_bymonth, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_periods': self.get_periods,
'get_months_tol': self.get_months_tol,
'get_total': self.get_total,
})
self.context = context
self.mnths = []
self.mnths_total = []
self.total = 0.0
def get_periods(self, form):
# Get start year-month-date and end year-month-date
first_year = int(form['start_date'][0:4])
last_year = int(form['end_date'][0:4])
first_month = int(form['start_date'][5:7])
last_month = int(form['end_date'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('None')
self.mnths.append('None')
return [mnth_name]
def get_salary(self, form, emp_id, emp_salary, total_mnths):
category_id = form.get('category_id', [])
category_id = category_id and category_id[0] or False
self.cr.execute("select to_char(date_to,'mm-yyyy') as to_date ,sum(pl.total) \
from hr_payslip_line as pl \
left join hr_payslip as p on pl.slip_id = p.id \
left join hr_employee as emp on emp.id = p.employee_id \
left join resource_resource as r on r.id = emp.resource_id \
where p.state = 'done' and p.employee_id = %s and pl.category_id = %s \
group by r.name, p.date_to,emp.id",(emp_id, category_id,))
sal = self.cr.fetchall()
salary = dict(sal)
total = 0.0
cnt = 1
for month in self.mnths:
if month <> 'None':
if len(month) != 7:
month = '0' + str(month)
if month in salary and salary[month]:
emp_salary.append(salary[month])
total += salary[month]
total_mnths[cnt] = total_mnths[cnt] + salary[month]
else:
emp_salary.append(0.00)
else:
emp_salary.append('')
total_mnths[cnt] = ''
cnt = cnt + 1
return emp_salary, total, total_mnths
def get_employee(self, form):
emp_salary = []
salary_list = []
total_mnths=['Total', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
for emp_id in employees:
emp_salary.append(emp_id.name)
total = 0.0
emp_salary, total, total_mnths = self.get_salary(form, emp_id.id, emp_salary, total_mnths)
emp_salary.append(total)
salary_list.append(emp_salary)
emp_salary = []
self.mnths_total.append(total_mnths)
return salary_list
def get_months_tol(self):
return self.mnths_total
def get_total(self):
for item in self.mnths_total:
for count in range(1, len(item)):
if item[count] == '':
continue
self.total += item[count]
return self.total
report_sxw.report_sxw('report.salary.employee.bymonth', 'hr.salary.employee.month', 'l10n_in_hr_payroll/report/report_hr_salary_employee_bymonth.rml', parser=report_hr_salary_employee_bymonth, header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
apark263/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/clip_ops_test.py | 8 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.clip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ClipTest(test.TestCase):
def DISABLED_testClipByValueGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs_1 = clip_ops.clip_by_value(inputs, 0.5, 3.5)
min_val = constant_op.constant([0.5, 0.5, 0.5, 0.5], dtype=dtypes.float32)
max_val = constant_op.constant([3.5, 3.5, 3.5, 3.5], dtype=dtypes.float32)
outputs_2 = clip_ops.clip_by_value(inputs, min_val, max_val)
with self.cached_session():
error_1 = gradient_checker.compute_gradient_error(inputs, [4], outputs_1,
[4])
self.assertLess(error_1, 1e-4)
error_2 = gradient_checker.compute_gradient_error(inputs, [4], outputs_2,
[4])
self.assertLess(error_2, 1e-4)
# ClipByValue test
def testClipByValue(self):
with self.session(use_gpu=True):
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
clip_value = 4.4
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Scalar, Scalar]
def DISABLED_testClipByValue0Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [4, 4, 4]]
clip_value_min = 2
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Tensor, Scalar]
def DISABLED_testClipByValue1Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [4, 4, 4]]
clip_value_min = constant_op.constant(
[2, 2, 2, 3, 3, 3], shape=[2, 3], dtype=dtype)
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Scalar, Tensor]
def DISABLED_testClipByValue2Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[4, 4, 4], [4, 5, 6]]
clip_value_min = 4
clip_value_max = constant_op.constant(
[6, 6, 6, 6, 6, 6], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Tensor, Tensor]
def DISABLED_testClipByValue3Type(self):
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16
]:
with self.cached_session(use_gpu=True):
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [5, 5, 6]]
clip_value_min = constant_op.constant(
[2, 2, 2, 5, 5, 5], shape=[2, 3], dtype=dtype)
clip_value_max = constant_op.constant(
[5, 5, 5, 7, 7, 7], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByValueBadShape(self):
with self.session(use_gpu=True):
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, -clip, clip)
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, 1.0, clip)
def testClipByValueNonFinite(self):
# TODO(b/78016351): Enable test on GPU once the bug is fixed.
with self.cached_session():
x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# ClipByNorm tests
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans_tensor = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
self.assertAllClose(np_ans, tf_ans_tensor)
@test_util.run_deprecated_v1
def testClipByNormGradientZeros(self):
with self.session(use_gpu=True):
x = array_ops.zeros([3])
b = clip_ops.clip_by_norm(x, 1.)
grad, = gradients_impl.gradients(b, x)
self.assertAllEqual(grad.eval(), [1., 1., 1.])
def testClipByNormBadShape(self):
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_norm(x, clip)
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.session(use_gpu=True):
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim0(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [0])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim1(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClippedWithAxes(self):
# No norm clipping when clip_norm >= 5
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# ClipByGlobalNorm tests
@test_util.run_deprecated_v1
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormClippedTensor(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = constant_op.constant(4.0)
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[2].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = ops.IndexedSlices(
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormPreservesDenseShape(self):
dense_shape = (1,)
slices = ops.IndexedSlices(
constant_op.constant([1.0]),
constant_op.constant([0]),
dense_shape=dense_shape)
ans, _ = clip_ops.clip_by_global_norm([slices], 1.0)
modified_slices = ans[0]
self.assertEqual(dense_shape, slices.dense_shape)
self.assertEqual(dense_shape, modified_slices.dense_shape)
@test_util.run_deprecated_v1
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
np_ans_0 = [[-2.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.session(use_gpu=True):
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([0.0, 0.0])
# Norm = 0, no changes
np_ans_0 = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormInf(self):
with self.session(use_gpu=True):
x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
self.evaluate(norm)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
ans[0].eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
ans[1].eval()
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormClippedTensor(self):
# Norm clipping when average clip_norm < 0.83333333
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.session(use_gpu=True):
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormReplacedWithClipByNorm(self):
# Check clip_by_average_norm(t) is the same as
# clip_by_norm(t, clip_norm * tf.to_float(tf.size(t)))
with self.session(use_gpu=True):
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
# expected answer [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
with_norm = clip_ops.clip_by_average_norm(x, clip_norm)
without_norm = clip_ops.clip_by_norm(
x, clip_norm * math_ops.to_float(array_ops.size(x)))
clip_by_average_norm_ans = self.evaluate(with_norm)
clip_by_norm_ans = self.evaluate(without_norm)
self.assertAllClose(clip_by_average_norm_ans, clip_by_norm_ans)
@test_util.run_deprecated_v1
def testClipByValueEmptyTensor(self):
# Test case for GitHub issue 19337
zero = array_ops.placeholder(dtype=dtypes.float32, shape=None)
x = clip_ops.clip_by_value(zero, zero, zero)
y = clip_ops.clip_by_value(zero, 1.0, 1.0)
z = clip_ops.clip_by_value(zero, zero, 1.0)
w = clip_ops.clip_by_value(zero, 1.0, zero)
with self.session(use_gpu=True) as sess:
sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))})
if __name__ == '__main__':
test.main()
|
evaunit01/Small-encryption-project | refs/heads/master | src/encrypt_project.py | 1 | import base64
def xor(x, y):
return int(x,16)^int(y,16)
def decrypt(data,key):
ans=""
for temp in [data[a:a+2] for a in range(0,len(data),2)]:
ans=ans+str(hex(xor(temp,key)))[2:]
return ans
def encrypt(s):
# encrypt
hex_list = [hex(ord(x))[2:] for x in s]
hex_state = ''
for x in hex_list:
hex_state += str(x)
xor_key = "38"
a = decrypt(hex_state, xor_key)
b = base64.b64encode(a.decode('hex'))
print b
#decrypt
hex_list2 = []
ascii_list = []
q = b.decode('base64').encode('hex')
r = str(decrypt(q, xor_key))
for x in range(0, len(r), 2):
hex_list2.append("0x"+r[x:x+2])
for x in hex_list2:
ascii_list.append(int(x,0))
return "".join(chr(x) for x in ascii_list)
s = raw_input()
print encrypt(s) |
dexterx17/nodoSocket | refs/heads/master | clients/Python-2.7.6/Mac/Modules/fm/fmscan.py | 34 | # Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "Fonts"
SHORT = "fm"
def main():
input = "Fonts.h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
return classname, listname
def makeblacklistnames(self):
return [
"OutlineMetrics", # Too complicated
"AntiTextIsAntiAliased", # XXXX Missing from library...
"AntiTextGetEnabled",
"AntiTextSetEnabled",
"AntiTextGetApplicationAware",
"AntiTextSetApplicationAware",
# These are tricky: they're not Carbon dependent or anything, but they
# exist only on 8.6 or later (both in Carbon and Classic).
# Disabling them is the easiest path.
'SetAntiAliasedTextEnabled',
'IsAntiAliasedTextEnabled',
# OS8-only
'InitFonts',
'SetFontLock',
'FlushFonts',
]
def makeblacklisttypes(self):
return [
"FMInput_ptr", # Not needed for now
"FMOutPtr", # Ditto
## "void_ptr", # Don't know how to do this right now
"FontInfo", # Ditto
]
def makerepairinstructions(self):
return [
([('Str255', '*', 'InMode')], [('Str255', '*', 'OutMode')]),
([('FMetricRecPtr', 'theMetrics', 'InMode')], [('FMetricRecPtr', 'theMetrics', 'OutMode')]),
([('short', 'byteCount', 'InMode'), ('void_ptr', 'textAddr', 'InMode'),],
[('TextBuffer', 'inText', 'InMode')]),
]
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
self.defsfile.write("kNilOptions = 0\n")
if __name__ == "__main__":
main()
|
pradeep-gr/mbed-os5-onsemi | refs/heads/master | tools/export/uvision/__init__.py | 7 | import os
from os.path import sep, normpath, join, exists
import ntpath
import copy
from collections import namedtuple
import shutil
from subprocess import Popen, PIPE
import re
from tools.arm_pack_manager import Cache
from tools.targets import TARGET_MAP
from tools.export.exporters import Exporter, apply_supported_whitelist
from tools.export.cmsis import DeviceCMSIS
cache_d = False
class DeviceUvision(DeviceCMSIS):
"""Uvision Device class, inherits CMSIS Device class
Encapsulates information necessary for uvision project targets"""
def __init__(self, target):
DeviceCMSIS.__init__(self, target)
dev_format = "$$Device:{0}${1}"
self.svd = ''
if self.debug_svd:
self.svd = dev_format.format(self.dname, self.debug_svd)
self.reg_file = dev_format.format(self.dname, self.compile_header)
self.debug_interface = self.uv_debug()
self.flash_dll = self.generate_flash_dll()
def uv_debug(self):
"""Return a namedtuple of information about uvision debug settings"""
UVDebug = namedtuple('UVDebug',['bin_loc','core_flag', 'key'])
# CortexMXn => pCMX
cpu = self.core.replace("Cortex-", "C")
cpu = cpu.replace("+", "")
cpu = cpu.replace("F", "")
cpu_flag = "p"+cpu
# Locations found in Keil_v5/TOOLS.INI
debuggers = {"st-link": ('STLink\\ST-LINKIII-KEIL_SWO.dll', 'ST-LINKIII-KEIL_SWO'),
"j-link":('Segger\\JL2CM3.dll', 'JL2CM3'),
"cmsis-dap":('BIN\\CMSIS_AGDI.dll', 'CMSIS_AGDI'),
"nulink":('NULink\\Nu_Link.dll','Nu_Link')}
res = debuggers[self.debug.lower()]
binary = res[0]
key = res[1]
return UVDebug(binary, cpu_flag, key)
def generate_flash_dll(self):
'''Flash DLL string from uvision
S = SW/JTAG Clock ID
C = CPU index in JTAG chain
P = Access Port
For the Options for Target -> Debug tab -> settings -> "Flash" tab in the dialog:
FD = RAM Start for Flash Functions
FC = RAM Size for Flash Functions
FN = Number of Flash types
FF = Flash File Name (without an extension)
FS = Start Address of the Flash Device
FL = Size of the Flash Device
FP = Full path to the Device algorithm (RTE)
Necessary to flash some targets. Info gathered from algorithms field of pdsc file.
'''
fl_count = 0
def get_mem_no_x(mem_str):
mem_reg = "\dx(\w+)"
m = re.search(mem_reg, mem_str)
return m.group(1) if m else None
RAMS = [(get_mem_no_x(info["start"]), get_mem_no_x(info["size"]))
for mem, info in self.target_info["memory"].items() if "RAM" in mem]
format_str = "UL2CM3(-S0 -C0 -P0 -FD{ramstart}"+" -FC{ramsize} "+"-FN{num_algos} {extra_flags})"
ramstart = ''
#Default according to Keil developer
ramsize = '1000'
if len(RAMS)>=1:
ramstart = RAMS[0][0]
extra_flags = []
for name, info in self.target_info["algorithm"].items():
if not name or not info:
continue
if int(info["default"])==0:
continue
name_reg = "\w*/([\w_]+)\.flm"
m = re.search(name_reg, name.lower())
fl_name = m.group(1) if m else None
name_flag = "-FF" + str(fl_count) + fl_name
start, size = get_mem_no_x(info["start"]), get_mem_no_x(info["size"])
rom_start_flag = "-FS"+str(fl_count)+str(start)
rom_size_flag = "-FL" + str(fl_count) + str(size)
if info["ramstart"] is not None and info["ramsize"] is not None:
ramstart = get_mem_no_x(info["ramstart"])
ramsize = get_mem_no_x(info["ramsize"])
path_flag = "-FP" + str(fl_count) + "($$Device:"+self.dname+"$"+name+")"
extra_flags.extend([name_flag, rom_start_flag, rom_size_flag, path_flag])
fl_count += 1
extra = " ".join(extra_flags)
return format_str.format(ramstart=ramstart,
ramsize=ramsize,
extra_flags=extra, num_algos=fl_count)
class Uvision(Exporter):
"""Keil Uvision class
This class encapsulates information to be contained in a Uvision
project file (.uvprojx).
The needed information can be viewed in uvision.tmpl
"""
NAME = 'uvision5'
TOOLCHAIN = 'ARM'
POST_BINARY_WHITELIST = set([
"MCU_NRF51Code.binary_hook",
"TEENSY3_1Code.binary_hook",
"LPCTargetCode.lpc_patch",
"LPC4088Code.binary_hook",
"MTSCode.combine_bins_mts_dot",
"MTSCode.combine_bins_mts_dragonfly",
"NCS36510TargetCode.ncs36510_addfib"
])
@classmethod
def is_target_supported(cls, target_name):
target = TARGET_MAP[target_name]
return apply_supported_whitelist(
cls.TOOLCHAIN, cls.POST_BINARY_WHITELIST, target) and\
DeviceCMSIS.check_supported(target_name)
#File associations within .uvprojx file
file_types = {'.cpp': 8, '.c': 1, '.s': 2,
'.obj': 3, '.o': 3, '.lib': 4,
'.ar': 4, '.h': 5, '.hpp': 5, '.sct': 4}
def uv_files(self, files):
"""An generator containing Uvision specific information about project files
Positional Arguments:
files - the location of source files
.uvprojx XML for project file:
<File>
<FileType>{{file.type}}</FileType>
<FileName>{{file.name}}</FileName>
<FilePath>{{file.loc}}</FilePath>
</File>
"""
for loc in files:
#Encapsulates the information necessary for template entry above
UVFile = namedtuple('UVFile', ['type','loc','name'])
_, ext = os.path.splitext(loc)
if ext.lower() in self.file_types:
type = self.file_types[ext.lower()]
name = ntpath.basename(normpath(loc))
yield UVFile(type, loc, name)
def format_flags(self):
"""Format toolchain flags for Uvision"""
flags = copy.deepcopy(self.flags)
# to be preprocessed with armcc
asm_flag_string = '--cpreproc --cpreproc_opts=-D__ASSERT_MSG,' + \
",".join(flags['asm_flags'])
flags['asm_flags'] = asm_flag_string
# All non-asm flags are in one template field
c_flags = list(set(flags['c_flags'] + flags['cxx_flags'] +flags['common_flags']))
# These flags are in template to be set by user i n IDE
template = ["--no_vla", "--cpp", "--c99"]
# Flag is invalid if set in template
# Optimizations are also set in the template
invalid_flag = lambda x: x in template or re.match("-O(\d|time)", x)
flags['c_flags'] = [flag.replace('"','\\"') for flag in c_flags if not invalid_flag(flag)]
flags['c_flags'] = " ".join(flags['c_flags'])
return flags
def format_src(self, srcs):
"""Make sources into the named tuple for use in the template"""
grouped = self.group_project_files(srcs)
for group, files in grouped.items():
grouped[group] = sorted(list(self.uv_files(files)),
key=lambda (_, __, name): name.lower())
return grouped
@staticmethod
def format_fpu(core):
"""Generate a core's FPU string"""
if core.endswith("FD"):
return "FPU3(DFPU)"
elif core.endswith("F"):
return "FPU2"
else:
return ""
def generate(self):
"""Generate the .uvproj file"""
cache = Cache(True, False)
if cache_d:
cache.cache_descriptors()
srcs = self.resources.headers + self.resources.s_sources + \
self.resources.c_sources + self.resources.cpp_sources + \
self.resources.objects + self.resources.libraries
ctx = {
'name': self.project_name,
# project_files => dict of generators - file group to generator of
# UVFile tuples defined above
'project_files': sorted(list(self.format_src(srcs).iteritems()),
key=lambda (group, _): group.lower()),
'linker_script':self.resources.linker_script,
'include_paths': '; '.join(self.resources.inc_dirs).encode('utf-8'),
'device': DeviceUvision(self.target),
}
core = ctx['device'].core
ctx['cputype'] = core.rstrip("FD")
if core.endswith("FD"):
ctx['fpu_setting'] = 3
elif core.endswith("F"):
ctx['fpu_setting'] = 2
else:
ctx['fpu_setting'] = 1
ctx['fputype'] = self.format_fpu(core)
ctx.update(self.format_flags())
self.gen_file('uvision/uvision.tmpl', ctx, self.project_name+".uvprojx")
self.gen_file('uvision/uvision_debug.tmpl', ctx, self.project_name + ".uvoptx")
@staticmethod
def build(project_name, log_name='build_log.txt', cleanup=True):
""" Build Uvision project """
# > UV4 -r -j0 -o [log_name] [project_name].uvprojx
proj_file = project_name + ".uvprojx"
cmd = ['UV4', '-r', '-j0', '-o', log_name, proj_file]
# Build the project
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
ret_code = p.returncode
# Print the log file to stdout
with open(log_name, 'r') as f:
print f.read()
# Cleanup the exported and built files
if cleanup:
os.remove(log_name)
os.remove(project_name+".uvprojx")
os.remove(project_name+".uvoptx")
# legacy .build directory cleaned if exists
if exists('.build'):
shutil.rmtree('.build')
if exists('BUILD'):
shutil.rmtree('BUILD')
# Returns 0 upon success, 1 upon a warning, and neither upon an error
if ret_code != 0 and ret_code != 1:
# Seems like something went wrong.
return -1
else:
return 0
|
ciaracdb/ACMusic | refs/heads/master | bot/players/YoutubePlayer.py | 1 | import asyncio
from bot.players.Player import Player
class YoutubePlayer(Player):
def __init__(self, client):
super().__init__(client)
self.discordPlayer = None
async def startQueue(self):
await self.next()
def songFinished(self):
coroutine = self.next()
future = asyncio.run_coroutine_threadsafe(coroutine, self.client.loop)
try:
future.result()
except:
pass
async def next(self):
if not self.queue.empty():
self.discordPlayer = await self.client.voiceUser.voiceClient.create_ytdl_player(self.queue.get(), after=self.songFinished)
self.discordPlayer.volume = 0.5
self.discordPlayer.start()
def pause(self):
self.discordPlayer.pause()
def resume(self):
self.discordPlayer.resume()
def stop(self):
self.discordPlayer.stop()
|
shaistaansari/django | refs/heads/master | tests/indexes/tests.py | 321 | from unittest import skipUnless
from django.db import connection
from django.test import TestCase
from .models import Article, ArticleTranslation, IndexTogetherSingleList
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=("c1", "c2", "c3"),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_7ce4cc86123")
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[2])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', "This is a mysql-specific issue")
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [])
|
ernstp/kivy | refs/heads/master | kivy/uix/abstractview.py | 44 | '''
Abstract View
=============
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
The :class:`~kivy.uix.abstractview.AbstractView` widget has an adapter property
for an adapter that mediates to data. The adapter manages an
item_view_instance dict property that holds views for each data item,
operating as a cache.
'''
__all__ = ('AbstractView', )
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty
class AbstractView(FloatLayout):
'''
View using an :class:`~kivy.adapters.adapter.Adapter` as a data provider.
'''
adapter = ObjectProperty(None)
'''The adapter can be one of several kinds of
:class:`adapters <kivy.adapters.adapter.Adapter>`. The most
common example is the :class:`~kivy.adapters.listadapter.ListAdapter` used
for managing data items in a list.
'''
|
CloudBrewery/duplicity-swiftkeys | refs/heads/master | duplicity/backends/megabackend.py | 3 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2011 Carlos Abalde <carlos.abalde@gmail.com>
# for gdocsbackend.py on which megabackend.py is based on
#
# Copyright 2013 Christian Kornacker <christian.kornacker@gmail.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import duplicity.backend
from duplicity import log
from duplicity.errors import BackendException
class MegaBackend(duplicity.backend.Backend):
"""Connect to remote store using Mega.co.nz API"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
try:
from mega import Mega
except ImportError:
raise BackendException('Mega.co.nz backend requires Mega.co.nz APIs Python Module'
'(see https://github.com/richardasaurus/mega.py).')
# Setup client instance.
self.client = Mega()
self.client.domain = parsed_url.hostname
self.__authorize(parsed_url.username, self.get_password())
# Fetch destination folder entry (and crete hierarchy if required).
folder_names = parsed_url.path[1:].split('/')
files = self.client.get_files()
parent_folder = self.client.root_id
for folder_name in folder_names:
entries = self.__filter_entries(files, parent_folder, folder_name, 'folder')
if len(entries):
# use first matching folder as new parent
parent_folder = entries.keys()[0]
else:
# create subfolder if folder doesn't exist and use its handle as parent
folder_node = self.client.create_folder(folder_name, parent_folder)
parent_folder = self.client.get_id_from_obj(folder_node)
# update filelist after creating new folder
files = self.client.get_files()
self.folder = parent_folder
def _put(self, source_path, remote_filename):
try:
self._delete(remote_filename)
except Exception:
pass
self.client.upload(source_path.get_canonical(), self.folder, dest_filename=remote_filename)
def _get(self, remote_filename, local_path):
files = self.client.get_files()
entries = self.__filter_entries(files, self.folder, remote_filename, 'file')
if len(entries):
# get first matching remote file
entry = entries.keys()[0]
self.client.download((entry, entries[entry]), dest_filename=local_path.name)
else:
raise BackendException("Failed to find file '%s' in remote folder '%s'"
% (remote_filename, self.__get_node_name(self.folder)),
code=log.ErrorCode.backend_not_found)
def _list(self):
entries = self.client.get_files_in_node(self.folder)
return [self.client.get_name_from_file({entry:entries[entry]}) for entry in entries]
def _delete(self, filename):
files = self.client.get_files()
entries = self.__filter_entries(files, self.folder, filename, 'file')
if len(entries):
self.client.destroy(entries.keys()[0])
else:
raise BackendException("Failed to find file '%s' in remote folder '%s'"
% (filename, self.__get_node_name(self.folder)),
code=log.ErrorCode.backend_not_found)
def __get_node_name(self, handle):
"""get node name from public handle"""
files = self.client.get_files()
return self.client.get_name_from_file({handle:files[handle]})
def __authorize(self, email, password):
self.client.login(email, password)
def __filter_entries(self, entries, parent_id=None, title=None, type=None):
result = {}
type_map = { 'folder': 1, 'file': 0 }
for k, v in entries.items():
try:
if parent_id != None:
assert(v['p'] == parent_id)
if title != None:
assert(v['a']['n'] == title)
if type != None:
assert(v['t'] == type_map[type])
except AssertionError:
continue
result.update({k:v})
return result
duplicity.backend.register_backend('mega', MegaBackend)
|
coderbone/SickRage-alt | refs/heads/master | lib/requests_oauthlib/oauth1_auth.py | 49 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from oauthlib.common import extract_params
from oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER
from oauthlib.oauth1 import SIGNATURE_TYPE_BODY
from requests.compat import is_py3
from requests.utils import to_native_string
from requests.auth import AuthBase
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
if is_py3:
unicode = str
log = logging.getLogger(__name__)
# OBS!: Correct signing of requests are conditional on invoking OAuth1
# as the last step of preparing a request, or at least having the
# content-type set properly.
class OAuth1(AuthBase):
"""Signs the request using OAuth 1 (RFC5849)"""
client_class = Client
def __init__(self, client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None, verifier=None,
decoding='utf-8',
client_class=None,
force_include_body=False,
**kwargs):
try:
signature_type = signature_type.upper()
except AttributeError:
pass
client_class = client_class or self.client_class
self.force_include_body = force_include_body
self.client = client_class(client_key, client_secret, resource_owner_key,
resource_owner_secret, callback_uri, signature_method,
signature_type, rsa_key, verifier, decoding=decoding, **kwargs)
def __call__(self, r):
"""Add OAuth parameters to the request.
Parameters may be included from the body if the content-type is
urlencoded, if no content type is set a guess is made.
"""
# Overwriting url is safe here as request will not modify it past
# this point.
log.debug('Signing request %s using client %s', r, self.client)
content_type = r.headers.get('Content-Type', '')
if (not content_type and extract_params(r.body)
or self.client.signature_type == SIGNATURE_TYPE_BODY):
content_type = CONTENT_TYPE_FORM_URLENCODED
if not isinstance(content_type, unicode):
content_type = content_type.decode('utf-8')
is_form_encoded = (CONTENT_TYPE_FORM_URLENCODED in content_type)
log.debug('Including body in call to sign: %s',
is_form_encoded or self.force_include_body)
if is_form_encoded:
r.headers['Content-Type'] = CONTENT_TYPE_FORM_URLENCODED
r.url, headers, r.body = self.client.sign(
unicode(r.url), unicode(r.method), r.body or '', r.headers)
elif self.force_include_body:
# To allow custom clients to work on non form encoded bodies.
r.url, headers, r.body = self.client.sign(
unicode(r.url), unicode(r.method), r.body or '', r.headers)
else:
# Omit body data in the signing of non form-encoded requests
r.url, headers, _ = self.client.sign(
unicode(r.url), unicode(r.method), None, r.headers)
r.prepare_headers(headers)
r.url = to_native_string(r.url)
log.debug('Updated url: %s', r.url)
log.debug('Updated headers: %s', headers)
log.debug('Updated body: %r', r.body)
return r
|
riadnassiffe/Simulator | refs/heads/master | src/tools/ecos/cvxpy/examples/extensions/mixed_integer/noncvx_variable.py | 12 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import abc
import cvxpy
import cvxpy.interface as intf
import cvxopt
class NonCvxVariable(cvxpy.Variable):
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
super(NonCvxVariable, self).__init__(*args, **kwargs)
self.noncvx = True
self.z = cvxpy.Parameter(*self.size)
self.init_z()
self.u = cvxpy.Parameter(*self.size)
self.u.value = cvxopt.matrix(0, self.size, tc='d')
# Initializes the value of the replicant variable.
def init_z(self):
self.z.value = cvxopt.matrix(0, self.size, tc='d')
# Verify that the matrix has the same dimensions as the variable.
def validate_matrix(self, matrix):
if self.size != intf.size(matrix):
raise Exception(("The argument's dimensions must match "
"the variable's dimensions."))
# Wrapper to validate matrix.
def round(self, matrix):
self.validate_matrix(matrix)
return self._round(matrix)
# Project the matrix into the space defined by the non-convex constraint.
# Returns the updated matrix.
@abc.abstractmethod
def _round(matrix):
return NotImplemented
# Wrapper to validate matrix and update curvature.
def fix(self, matrix):
matrix = self.round(matrix)
return self._fix(matrix)
# Fix the variable so it obeys the non-convex constraint.
@abc.abstractmethod
def _fix(self, matrix):
return NotImplemented
|
yonglehou/scikit-learn | refs/heads/master | sklearn/manifold/setup.py | 198 | import os
import numpy
from numpy.distutils.misc_util import Configuration
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
kseistrup/qtile | refs/heads/develop | libqtile/widget/sep.py | 11 | # Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012, 2015 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
class Sep(base._Widget):
"""
A visible widget separator.
"""
orientations = base.ORIENTATION_BOTH
defaults = [
("padding", 2, "Padding on either side of separator."),
("linewidth", 1, "Width of separator line."),
("foreground", "888888", "Separator line colour."),
(
"size_percent",
80,
"Size as a percentage of bar size (0-100)."
),
]
def __init__(self, height_percent=None, **config):
# 'height_percent' was replaced by 'size_percent' since the widget can
# be installed in vertical bars
if height_percent is not None:
base.deprecated('height_percent kwarg or positional argument is '
'deprecated. Please use size_percent.')
config["size_percent"] = height_percent
length = config.get("padding", 2) * 2 + config.get("linewidth", 1)
base._Widget.__init__(self, length, **config)
self.add_defaults(Sep.defaults)
self.length = self.padding + self.linewidth
def draw(self):
self.drawer.clear(self.background or self.bar.background)
if self.bar.horizontal:
margin_top = (self.bar.height / float(100) *
(100 - self.size_percent)) / 2.0
self.drawer.draw_vbar(
self.foreground,
float(self.length) / 2,
margin_top,
self.bar.height - margin_top,
linewidth=self.linewidth
)
self.drawer.draw(offsetx=self.offset, width=self.length)
else:
margin_left = (self.bar.width / float(100) *
(100 - self.size_percent)) / 2.0
self.drawer.draw_hbar(
self.foreground,
margin_left,
self.bar.width - margin_left,
float(self.length) / 2,
linewidth=self.linewidth
)
self.drawer.draw(offsety=self.offset, height=self.length)
|
Wingless-Archangel/CustomCommand | refs/heads/master | Python Practice/learn_django/lib/python3.6/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py | 514 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
self.num_connections, self.host, self.authurl)
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', reshdr)
log.debug('Response data: %s [...]', res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', dict(res.getheaders()))
log.debug('Response data: %s [...]', res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
lidavidm/sympy | refs/heads/master | sympy/functions/special/tensor_functions.py | 4 | from __future__ import print_function, division
from sympy.core.function import Function, C
from sympy.core import S, Integer
from sympy.core.mul import prod
from sympy.utilities.iterables import (has_dups, default_sort_key)
from sympy.core.compatibility import xrange
###############################################################################
###################### Kronecker Delta, Levi-Civita etc. ######################
###############################################################################
def Eijk(*args, **kwargs):
"""
Represent the Levi-Civita symbol.
This is just compatibility wrapper to ``LeviCivita()``.
See Also
========
LeviCivita
"""
return LeviCivita(*args, **kwargs)
def eval_levicivita(*args):
"""Evaluate Levi-Civita symbol."""
from sympy import factorial
n = len(args)
return prod(
prod(args[j] - args[i] for j in xrange(i + 1, n))
/ factorial(i) for i in xrange(n))
# converting factorial(i) to int is slightly faster
class LeviCivita(Function):
"""Represent the Levi-Civita symbol.
For even permutations of indices it returns 1, for odd permutations -1, and
for everything else (a repeated index) it returns 0.
Thus it represents an alternating pseudotensor.
Examples
========
>>> from sympy import LeviCivita
>>> from sympy.abc import i, j, k
>>> LeviCivita(1, 2, 3)
1
>>> LeviCivita(1, 3, 2)
-1
>>> LeviCivita(1, 2, 2)
0
>>> LeviCivita(i, j, k)
LeviCivita(i, j, k)
>>> LeviCivita(i, j, i)
0
See Also
========
Eijk
"""
is_integer = True
@classmethod
def eval(cls, *args):
if all(isinstance(a, (int, Integer)) for a in args):
return eval_levicivita(*args)
if has_dups(args):
return S.Zero
def doit(self):
return eval_levicivita(*self.args)
class KroneckerDelta(Function):
"""The discrete, or Kronecker, delta function.
A function that takes in two integers `i` and `j`. It returns `0` if `i` and `j` are
not equal or it returns `1` if `i` and `j` are equal.
Parameters
==========
i : Number, Symbol
The first index of the delta function.
j : Number, Symbol
The second index of the delta function.
Examples
========
A simple example with integer indices::
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> KroneckerDelta(1, 2)
0
>>> KroneckerDelta(3, 3)
1
Symbolic indices::
>>> from sympy.abc import i, j, k
>>> KroneckerDelta(i, j)
KroneckerDelta(i, j)
>>> KroneckerDelta(i, i)
1
>>> KroneckerDelta(i, i + 1)
0
>>> KroneckerDelta(i, i + 1 + k)
KroneckerDelta(i, i + k + 1)
See Also
========
eval
sympy.functions.special.delta_functions.DiracDelta
References
==========
.. [1] http://en.wikipedia.org/wiki/Kronecker_delta
"""
nargs = 2
is_integer = True
@classmethod
def eval(cls, i, j):
"""
Evaluates the discrete delta function.
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy.abc import i, j, k
>>> KroneckerDelta(i, j)
KroneckerDelta(i, j)
>>> KroneckerDelta(i, i)
1
>>> KroneckerDelta(i, i + 1)
0
>>> KroneckerDelta(i, i + 1 + k)
KroneckerDelta(i, i + k + 1)
# indirect doctest
"""
if (i > j) is True:
return cls(j, i)
diff = C.Abs(i - j)
if diff == 0:
return S.One
elif diff.is_number:
return S.Zero
elif i != 0 and diff.is_nonzero:
return cls(0, diff.args[0])
if i.assumptions0.get("below_fermi") and \
j.assumptions0.get("above_fermi"):
return S.Zero
if j.assumptions0.get("below_fermi") and \
i.assumptions0.get("above_fermi"):
return S.Zero
# to make KroneckerDelta canonical
# following lines will check if inputs are in order
# if not, will return KroneckerDelta with correct order
if i is not min(i, j, key=default_sort_key):
return cls(j, i)
def _eval_power(self, expt):
if expt.is_positive:
return self
if expt.is_negative and not -expt is S.One:
return 1/self
@property
def is_above_fermi(self):
"""
True if Delta can be non-zero above fermi
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p, a).is_above_fermi
True
>>> KroneckerDelta(p, i).is_above_fermi
False
>>> KroneckerDelta(p, q).is_above_fermi
True
See Also
========
is_below_fermi, is_only_below_fermi, is_only_above_fermi
"""
if self.args[0].assumptions0.get("below_fermi"):
return False
if self.args[1].assumptions0.get("below_fermi"):
return False
return True
@property
def is_below_fermi(self):
"""
True if Delta can be non-zero below fermi
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p, a).is_below_fermi
False
>>> KroneckerDelta(p, i).is_below_fermi
True
>>> KroneckerDelta(p, q).is_below_fermi
True
See Also
========
is_above_fermi, is_only_above_fermi, is_only_below_fermi
"""
if self.args[0].assumptions0.get("above_fermi"):
return False
if self.args[1].assumptions0.get("above_fermi"):
return False
return True
@property
def is_only_above_fermi(self):
"""
True if Delta is restricted to above fermi
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p, a).is_only_above_fermi
True
>>> KroneckerDelta(p, q).is_only_above_fermi
False
>>> KroneckerDelta(p, i).is_only_above_fermi
False
See Also
========
is_above_fermi, is_below_fermi, is_only_below_fermi
"""
return ( self.args[0].assumptions0.get("above_fermi")
or
self.args[1].assumptions0.get("above_fermi")
) or False
@property
def is_only_below_fermi(self):
"""
True if Delta is restricted to below fermi
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p, i).is_only_below_fermi
True
>>> KroneckerDelta(p, q).is_only_below_fermi
False
>>> KroneckerDelta(p, a).is_only_below_fermi
False
See Also
========
is_above_fermi, is_below_fermi, is_only_above_fermi
"""
return ( self.args[0].assumptions0.get("below_fermi")
or
self.args[1].assumptions0.get("below_fermi")
) or False
@property
def indices_contain_equal_information(self):
"""
Returns True if indices are either both above or below fermi.
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> q = Symbol('q')
>>> KroneckerDelta(p, q).indices_contain_equal_information
True
>>> KroneckerDelta(p, q+1).indices_contain_equal_information
True
>>> KroneckerDelta(i, p).indices_contain_equal_information
False
"""
if (self.args[0].assumptions0.get("below_fermi") and
self.args[1].assumptions0.get("below_fermi")):
return True
if (self.args[0].assumptions0.get("above_fermi")
and self.args[1].assumptions0.get("above_fermi")):
return True
# if both indices are general we are True, else false
return self.is_below_fermi and self.is_above_fermi
@property
def preferred_index(self):
"""
Returns the index which is preferred to keep in the final expression.
The preferred index is the index with more information regarding fermi
level. If indices contain same information, 'a' is preferred before
'b'.
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> j = Symbol('j', below_fermi=True)
>>> p = Symbol('p')
>>> KroneckerDelta(p, i).preferred_index
i
>>> KroneckerDelta(p, a).preferred_index
a
>>> KroneckerDelta(i, j).preferred_index
i
See Also
========
killable_index
"""
if self._get_preferred_index():
return self.args[1]
else:
return self.args[0]
@property
def killable_index(self):
"""
Returns the index which is preferred to substitute in the final
expression.
The index to substitute is the index with less information regarding
fermi level. If indices contain same information, 'a' is preferred
before 'b'.
Examples
========
>>> from sympy.functions.special.tensor_functions import KroneckerDelta
>>> from sympy import Symbol
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> j = Symbol('j', below_fermi=True)
>>> p = Symbol('p')
>>> KroneckerDelta(p, i).killable_index
p
>>> KroneckerDelta(p, a).killable_index
p
>>> KroneckerDelta(i, j).killable_index
j
See Also
========
preferred_index
"""
if self._get_preferred_index():
return self.args[0]
else:
return self.args[1]
def _get_preferred_index(self):
"""
Returns the index which is preferred to keep in the final expression.
The preferred index is the index with more information regarding fermi
level. If indices contain same information, index 0 is returned.
"""
if not self.is_above_fermi:
if self.args[0].assumptions0.get("below_fermi"):
return 0
else:
return 1
elif not self.is_below_fermi:
if self.args[0].assumptions0.get("above_fermi"):
return 0
else:
return 1
else:
return 0
@staticmethod
def _latex_no_arg(printer):
return r'\delta'
|
dzipet/controlling-board | refs/heads/master | SaberTooth/src/pycomms.py | 2 | #!/usr/bin/python
# Python Standard Library Imports
import smbus
# External Imports
pass
# Custom Imports
pass
# ===========================================================================
# PyComms I2C Base Class (an rewriten Adafruit_I2C pythone class clone)
# ===========================================================================
class PyComms:
def __init__(self, address, bus = smbus.SMBus(1)):
self.address = address
self.bus = bus
def reverseByteOrder(self, data):
# Reverses the byte order of an int (16-bit) or long (32-bit) value
# Courtesy Vishal Sapre
dstr = hex(data)[2:].replace('L','')
byteCount = len(dstr[::2])
val = 0
for i, n in enumerate(range(byteCount)):
d = data & 0xFF
val |= (d << (8 * (byteCount - i - 1)))
data >>= 8
return val
def readBit(self, reg, bitNum):
b = self.readU8(reg)
data = b & (1 << bitNum)
return data
def writeBit(self, reg, bitNum, data):
b = self.readU8(reg)
if data != 0:
b = (b | (1 << bitNum))
else:
b = (b & ~(1 << bitNum))
return self.write8(reg, b)
def readBits(self, reg, bitStart, length):
# 01101001 read byte
# 76543210 bit numbers
# xxx args: bitStart=4, length=3
# 010 masked
# -> 010 shifted
b = self.readU8(reg)
mask = ((1 << length) - 1) << (bitStart - length + 1)
b &= mask
b >>= (bitStart - length + 1)
return b
def writeBits(self, reg, bitStart, length, data):
# 010 value to write
# 76543210 bit numbers
# xxx args: bitStart=4, length=3
# 00011100 mask byte
# 10101111 original value (sample)
# 10100011 original & ~mask
# 10101011 masked | value
b = self.readU8(reg)
mask = ((1 << length) - 1) << (bitStart - length + 1)
data <<= (bitStart - length + 1)
data &= mask
b &= ~(mask)
b |= data
return self.write8(reg, b)
def readBytes(self, reg, length):
output = []
i = 0
while i < length:
output.append(self.readU8(reg))
i += 1
return output
def readBytesListU(self, reg, length):
output = []
i = 0
while i < length:
output.append(self.readU8(reg + i))
i += 1
return output
def readBytesListS(self, reg, length):
output = []
i = 0
while i < length:
output.append(self.readS8(reg + i))
i += 1
return output
def writeList(self, reg, list):
# Writes an array of bytes using I2C format"
try:
self.bus.write_i2c_block_data(self.address, reg, list)
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def write8(self, reg, value):
# Writes an 8-bit value to the specified register/address
try:
self.bus.write_byte_data(self.address, reg, value)
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readU8(self, reg):
# Read an unsigned byte from the I2C device
try:
result = self.bus.read_byte_data(self.address, reg)
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readS8(self, reg):
# Reads a signed byte from the I2C device
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127:
return result - 256
else:
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readU16(self, reg):
# Reads an unsigned 16-bit value from the I2C device
try:
hibyte = self.bus.read_byte_data(self.address, reg)
result = (hibyte << 8) + self.bus.read_byte_data(self.address, reg + 1)
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readS16(self, reg):
# Reads a signed 16-bit value from the I2C device
try:
hibyte = self.bus.read_byte_data(self.address, reg)
if hibyte > 127:
hibyte -= 256
result = (hibyte << 8) + self.bus.read_byte_data(self.address, reg + 1)
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.