hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b21bd61473f9e05b8ff5798ab318a00a380f810 | 2,273 | py | Python | setup.py | LJunghansCode/example-dash-structure | 8d073c78be4631238b6acdc0e3755c029a18bcf0 | [
"MIT"
] | null | null | null | setup.py | LJunghansCode/example-dash-structure | 8d073c78be4631238b6acdc0e3755c029a18bcf0 | [
"MIT"
] | null | null | null | setup.py | LJunghansCode/example-dash-structure | 8d073c78be4631238b6acdc0e3755c029a18bcf0 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
# Package meta-data.
NAME = "dash-practice"
DESCRIPTION = "Project structure practice"
# These can be set to None if you want to omit them
URL = "None"
AUTHOR = "None"
AUTHOR_EMAIL = "None"
LICENSE = "MIT license"
REQUIRES_PYTHON = ">=3.6.0"
VERSION = None # get this from __version__.py
# What packages are required for this module to be executed?
REQUIRED = ["dash>=0.40.0", "dash-bootstrap-components>=0.3.0", "click"]
# What packages are optional?
EXTRAS = {"prod": ["mod_wsgi"]}
# get the absolute path to this file
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if "README.md" is present in your MANIFEST.in file!
try:
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# If VERSION not specified above, load the package"s __version__.py module as a
# dictionary.
about = {}
if not VERSION:
with open(
os.path.join(here, "src", "dash_practice", "__version__.py")
) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
setup(
name=NAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
python_requires=REQUIRES_PYTHON,
install_requires=REQUIRED,
extras_require=EXTRAS,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
version=about["__version__"],
packages=find_packages("src"),
package_dir={"": "src"},
package_data={
"": [
"assets/favicon.ico",
"assets/*.css",
"assets/*.js",
"assets/font-awesome/css/*.css",
"assets/font-awesome/webfonts/*.eot",
"assets/font-awesome/webfonts/*.svg",
"assets/font-awesome/webfonts/*.ttf",
"assets/font-awesome/webfonts/*.woff",
"assets/font-awesome/webfonts/*.woff2",
]
},
scripts=["bin/run-dash_practice-prod"],
entry_points={
"console_scripts": [
"run-dash_practice-dev=dash_practice.dev_cli:main"
]
},
)
| 26.741176 | 79 | 0.647602 |
682cef3a1021d4ec98b4043185629d7000e35e52 | 3,213 | py | Python | NicBot/cogs/bot_cogs.py | nicdgonzalez/NicBot-discord.py | 3a21510c1e4e2c933f48708478ae792159324a7c | [
"MIT"
] | null | null | null | NicBot/cogs/bot_cogs.py | nicdgonzalez/NicBot-discord.py | 3a21510c1e4e2c933f48708478ae792159324a7c | [
"MIT"
] | null | null | null | NicBot/cogs/bot_cogs.py | nicdgonzalez/NicBot-discord.py | 3a21510c1e4e2c933f48708478ae792159324a7c | [
"MIT"
] | null | null | null | from os import getcwd, listdir
from pathlib import Path
from typing import List
import discord
from discord.ext.commands import Bot, Cog, command, is_owner
from .utils import add_cog
# Trailing `[2:]` removes the drive (C:) from the path since `Path`
# uses a lowercase drive letter and `getcwd` uses an uppercase.
path: str = str(Path(__file__).parents[0]).replace('\\', '/')[2:]
cwd: str = getcwd().replace('\\', '/')[2:]
dot_fmt: str = (
path
.replace(cwd, '')
.replace('/', '.')
.strip('.')
+ '.'
)
class BotCogs(Cog):
def __init__(self, bot):
self.bot: Bot = bot
@Cog.listener()
async def on_ready(self):
print('Loaded > Class %s' % (self.__class__.__name__))
@command()
@is_owner()
async def load(self, ctx, extension: str):
"""Loads specified extension."""
self.bot.load_extension(dot_fmt + extension)
return await ctx.message.reply('Extension loaded!')
@command()
@is_owner()
async def unload(self, ctx, extension: str):
"""Loads specified extension."""
self.bot.unload_extension(dot_fmt + extension)
return await ctx.message.reply('Extension unloaded!')
@command()
@is_owner()
async def reload(self, ctx, extension: str = None):
"""Reloads specified extension or all if `extension` is None."""
action: str = None
response: str = (
f'Error {action} the extension `{extension}`. '
'It either does not exist or has not been loaded.'
)
if (extension is None):
async with ctx.typing():
extensions: List[str] = [
ext for ext
in listdir(path)
if not ext.startswith('_')
and ext.endswith('.py')
]
for extension in extensions:
# Remove '.py' from file name:
extension: str = extension[:-3]
name: str = dot_fmt + extension
try:
self.bot.unload_extension(name=name)
except Exception:
action = 'unloading'
await ctx.message.reply(response)
pass
try:
self.bot.load_extension(name=name)
except Exception as error:
action = 'loading'
await ctx.message.reply(f'{response}; {error}')
pass
await ctx.message.reply('All Extensions reloaded!')
elif (extension is not None):
self.bot.unload_extension(dot_fmt + extension)
self.bot.load_extension(dot_fmt + extension)
await ctx.message.reply('Extension reloaded!')
@command()
@is_owner()
async def logout(self, ctx):
"""Disconnects the bot from Discord."""
mention = ctx.author.mention
await ctx.message.reply(f'Hey {mention}, I am logging out now :wave:')
await self.bot.logout()
# Required at the end of every module.
def setup(bot):
return add_cog(bot=bot, name=__name__)
| 28.433628 | 78 | 0.544351 |
4c0c0851dce983dac6d44fde169b9e94809ffc1b | 117,651 | py | Python | aesara/gpuarray/elemwise.py | hs2361/aesara | 16f98e4fd69db92e0c2cde9dd97a0d005235deea | [
"BSD-3-Clause"
] | 1 | 2021-11-30T06:38:39.000Z | 2021-11-30T06:38:39.000Z | aesara/gpuarray/elemwise.py | fonnesbeck/aesara | 02378861f1a77135f2556018630092a09262ea76 | [
"BSD-3-Clause"
] | null | null | null | aesara/gpuarray/elemwise.py | fonnesbeck/aesara | 02378861f1a77135f2556018630092a09262ea76 | [
"BSD-3-Clause"
] | null | null | null | import copy
from io import StringIO
import numpy as np
from aesara import scalar as aes
from aesara.graph.basic import Apply
from aesara.graph.op import _NoPythonOp
from aesara.graph.utils import MethodNotDefined
from aesara.link.c.interface import HideC
from aesara.scalar import Composite, Scalar
from aesara.scalar.basic import complex_types, upgrade_to_float_no_complex
from aesara.scalar.basic_scipy import Erfcinv, Erfinv
from aesara.tensor.elemwise import CAReduceDtype, DimShuffle, Elemwise
try:
import pygpu
from pygpu import gpuarray
from pygpu.gpuarray import dtype_to_typecode
from pygpu.reduction import ReductionKernel
from pygpu.tools import ArrayArg
except ImportError:
pass
from .basic_ops import GpuKernelBase, Kernel, as_gpuarray_variable, infer_context_name
from .fp16_help import load_w, write_w
from .type import GpuArrayType, gpu_context_type
def make_argument(v, name):
return ArrayArg(np.dtype(v.type.dtype), name)
def as_C_string_const(s):
return "\n".join('"%s\\n"' % (l.replace('"', '\\"')) for l in s.split("\n"))
def get_scal(dt):
if dt == "float16":
dt = "float32"
return aes.get_scalar_type(dt)
def max_inputs_to_GpuElemwise(node_or_outputs):
"""
Compute the maximum number of inputs that fit in a kernel call.
"""
if isinstance(node_or_outputs, Apply):
outputs = node_or_outputs.outputs
else:
outputs = node_or_outputs
n_out = len(outputs)
ndim = outputs[0].type.ndim
ptr_size = 8
# Even with call32, the interface does not change, and shapes,
# strides, and offset are passed as 64-bits (8 bytes)
int_size = 8
# we take the limit from CUDA for now
nb_bytes_total = 4096
# Regardless of the number of arguments, we have:
# - The total number of elements (int)
# - The shape (int) on each dimension
fixed_size = int_size + int_size * ndim
# Each argument (input or output) has:
# - 1 pointer (ptr)
# - 1 offset (int)
# - 1 stride (int) per dimension
# Even if the tensor ends up being contiguous, code for the
# non-contiguous case still needs to be generated.
param_size = ptr_size + int_size + int_size * ndim
# Remaining for inputs
nb_bytes_for_inputs = nb_bytes_total - fixed_size - param_size * n_out
# Maximum number of inputs
max_nb_inputs = nb_bytes_for_inputs // param_size
return max_nb_inputs
class GpuElemwise(_NoPythonOp, HideC, Elemwise):
"""
Elemwise on the GPU.
"""
params_type = gpu_context_type
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
_f16_ok = True
def __str__(self):
if self.name is not None:
return self.name
items = str(sorted(self.inplace_pattern.items()))
return f"GpuElemwise{{{self.scalar_op}}}{items}<gpuarray>"
def max_inputs(self, node_or_outputs):
return max_inputs_to_GpuElemwise(node_or_outputs)
def make_node(self, *inputs):
ctx_name = infer_context_name(*inputs)
inputs = [as_gpuarray_variable(i, ctx_name) for i in inputs]
out_info = Elemwise.get_output_info(self, GpuDimShuffle, *inputs)
inputs = out_info[2]
outputs = [
GpuArrayType(broadcastable=br, context_name=ctx_name, dtype=dtype)()
for dtype, br in zip(out_info[0], out_info[1])
]
if len(outputs) > 1:
raise NotImplementedError()
if len(inputs) > max_inputs_to_GpuElemwise(outputs):
raise NotImplementedError(
"Can not make this GpuElemwise with that much inputs"
)
# Try to generate the kernel to catch SupportCodeErrors
scal_ins = [get_scal(i.dtype) for i in inputs]
fake_node = self.scalar_op.make_node(*[i() for i in scal_ins])
try:
code = fake_node.op.c_support_code_apply(fake_node, "test")
if code:
raise SupportCodeError(code)
except MethodNotDefined:
pass
try:
support_code = fake_node.op.c_support_code()
if "struct" in support_code:
# The macro is fine, the C++ struct is not.
raise SupportCodeError(
"struct aren't supported in GpuElemwise support_code" + support_code
)
except MethodNotDefined:
pass
node = Apply(self, inputs, outputs)
return node
def get_params(self, node):
return node.inputs[0].type.context
def _get_vnames(self, node):
inps = [f"i{n}" for n, _ in enumerate(node.inputs)]
outs = [
f"o{n}" if n not in self.inplace_pattern else inps[self.inplace_pattern[n]]
for n, _ in enumerate(node.outputs)
]
return inps, outs
def _generate_op_string(self, node):
inps, outs = self._get_vnames(node)
scal_v_ins = [get_scal(i.dtype)() for i in node.inputs]
# As float16 isn't a c type and most GPU don't compute on it,
# We convert the computation to float32, and let libgpuarray
# load in float16 and cast to float32 and do the reverse for
# the output.
scalar_op = self.scalar_op
if isinstance(scalar_op, (aes.Cast, Composite)):
scalar_op = scalar_op.clone_float32()
fake_node = scalar_op.make_node(*scal_v_ins)
scal_v_out = fake_node.outputs
assert len(scal_v_out) == len(node.outputs)
try:
kop = fake_node.op.c_code(
fake_node, "elem_scalar", inps, outs, dict(fail="return;")
)
except MethodNotDefined:
raise AssertionError(
"No c code for this scalar. Can not make a GpuElemwise"
)
# If the following assert fail, then we need to update the
# code handler above.
assert "npy_float16" not in kop
support_code = ""
try:
# We accept only some c_support_code().
# This filter is done in the make_node()
support_code += fake_node.op.c_support_code()
except MethodNotDefined:
pass
for npy, ga in [
("npy_bool", "ga_bool"),
("npy_uint8", "ga_ubyte"),
("npy_uint16", "ga_ushort"),
("npy_uint32", "ga_uint"),
("npy_uint64", "ga_ulong"),
("npy_int8", "ga_byte"),
("npy_int16", "ga_short"),
("npy_int32", "ga_int"),
("npy_int64", "ga_long"),
("npy_float16", "ga_half"),
("npy_float32", "ga_float"),
("npy_float64", "ga_double"),
]:
kop = kop.replace(npy, ga)
return support_code, kop
def c_headers(self, **kwargs):
return ["<numpy_compat.h>", "<gpuarray/types.h>", "<gpuarray/elemwise.h>"]
def c_support_code_struct(self, node, name):
return "\nGpuElemwise *ge;\n"
def c_init_code_struct(self, node, name, sub):
inps, outs = self._get_vnames(node)
nargs = len(inps) + len(outs) - len(self.inplace_pattern)
support_code, kop = self._generate_op_string(node)
res = """
gpuelemwise_arg args[%(nargs)s] = {{0}};
""" % dict(
nargs=nargs
)
for n, (i, name) in enumerate(zip(node.inputs, inps)):
res += """
args[%(n)s].name = %(name)s;
args[%(n)s].typecode = %(typecode)s;
args[%(n)s].flags = GE_READ;
""" % dict(
n=n, name='"{}"'.format(name), typecode=i.type.typecode
)
p = len(inps)
for n, o in enumerate(node.outputs):
if n in self.inplace_pattern:
assert len(node.outputs) == 1
res += "\nargs[%(n)s].flags |= GE_WRITE;\n" % dict(
n=self.inplace_pattern[n]
)
else:
res += """
args[%(n)s].name = %(name)s;
args[%(n)s].typecode = %(typecode)s;
args[%(n)s].flags = GE_WRITE;
""" % dict(
n=p, name='"{}"'.format(outs[n]), typecode=o.type.typecode
)
p += 1
res += """
ge = GpuElemwise_new(%(ctx)s->ctx, %(support)s, %(kop)s, %(nargs)s, args, %(nd)s, GE_CONVERT_F16);
if (ge == NULL) {
PyErr_SetString(PyExc_RuntimeError, "Could not initialize elemwise support");
%(fail)s
}
""" % dict(
nargs=nargs,
ctx=sub["params"],
fail=sub["fail"],
support=as_C_string_const(support_code),
kop=as_C_string_const(kop),
nd=node.inputs[0].ndim,
)
return res
def c_cleanup_code_struct(self, node, name):
return """
GpuElemwise_free(ge);
"""
def c_code(self, node, name, inputs, outputs, sub):
nd = node.outputs[0].ndim
fail = sub["fail"]
initial_dims = ",".join("1" for i in range(nd))
opname = str(self.scalar_op)
ctx = sub["params"]
nargs = len(node.inputs) + len(node.outputs) - len(self.inplace_pattern)
# check that all inputs have valid dimensions
emitted_inames = {}
code = (
"""
// +1 is so that MSVC is happy when nd == 0
size_t dims[%(nd)s+1] = {%(initial_dims)s};
void *rargs[%(nargs)s] = {0};
int err;
"""
% locals()
)
for idx, iname in enumerate(inputs):
if iname in emitted_inames:
assert emitted_inames[iname] is node.inputs[idx]
continue
broadcasts = map(int, node.inputs[idx].broadcastable)
broadcasts = ", ".join(map(str, broadcasts))
nd = node.inputs[idx].ndim
code += (
"""
int broadcasts_%(iname)s[%(nd)s+1] = {%(broadcasts)s};
"""
% locals()
)
emitted_inames[iname] = node.inputs[idx]
# check that all inputs have valid dimensions
emitted_inames = {}
for idx, iname in enumerate(inputs):
code += f"rargs[{idx}] = &{iname}->ga;\n"
if iname in emitted_inames:
continue
code += (
"""
if (%(nd)s != PyGpuArray_NDIM(%(iname)s))
{
PyErr_Format(PyExc_TypeError,
"need %(nd)s dims, not %%u",
PyGpuArray_NDIM(%(iname)s));
%(fail)s;
}
for (int i = 0; i< %(nd)s; ++i)
{
dims[i] = (dims[i] == 1) ? PyGpuArray_DIMS(%(iname)s)[i] : dims[i];
if ((!(broadcasts_%(iname)s[i] &&
PyGpuArray_DIMS(%(iname)s)[i] == 1)) &&
(dims[i] != PyGpuArray_DIMS(%(iname)s)[i]))
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" %(idx)d (indices start at 0) has shape[%%d] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(iname)s)[i],
(unsigned long long)dims[i]
);
%(fail)s;
}
}
"""
% locals()
)
emitted_inames[iname] = True
# check that all outputs have valid dimensions
p = len(node.inputs)
for idx, oname in enumerate(outputs):
typecode = dtype_to_typecode(node.outputs[idx].dtype)
if idx not in self.inplace_pattern.keys():
code += (
"""
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
Py_DECREF(%(oname)s);
%(oname)s = NULL;
}
}
if (%(oname)s && !GpuArray_CHKFLAGS(&(%(oname)s->ga), GA_C_CONTIGUOUS))
{
Py_XDECREF(%(oname)s);
%(oname)s = NULL;
}
if (NULL == %(oname)s)
{
%(oname)s = pygpu_empty(%(nd)d, dims,
%(typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(oname)s) {
%(fail)s
}
}
rargs[%(p)s] = &%(oname)s->ga;
"""
% locals()
)
p += 1
else:
input_idx = self.inplace_pattern[idx]
iname = inputs[input_idx]
code += (
"""
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_INCREF(%(oname)s);
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" %(idx)d (indices start at 0), working inplace"
" on input %(input_idx)s, has shape[%%i] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(oname)s)[i],
(unsigned long long)dims[i]
);
Py_DECREF(%(oname)s);
%(oname)s = NULL;
%(fail)s;
}
}
"""
% locals()
)
code += """
if (GpuElemwise_call(ge, rargs, GE_BROADCAST) != GA_NO_ERROR) {
PyErr_SetString(PyExc_RuntimeError, "Error in the elemwise call");
%(fail)s
}
""" % dict(
fail=sub["fail"]
)
return str(code)
def prepare_node(self, node, storage_map, compute_map, no_recycling, impl=None):
# Since we don't have a Python implementation, ignore `impl` and use C.
return super().prepare_node(
node, storage_map, compute_map, no_recycling, impl="c"
)
def c_code_cache_version(self):
ver = self.scalar_op.c_code_cache_version()
if ver:
return (10, ver)
else:
return ver
class SupportCodeError(Exception):
"""
We do not support certain things (such as the C++ complex struct).
"""
class GpuDimShuffle(DimShuffle):
"""
DimShuffle on the GPU.
"""
_f16_ok = True
c_func_name = "APPLY_SPECIFIC(gpu_dimshuffle)"
def make_node(self, input):
ctx_name = infer_context_name(input)
res = DimShuffle.make_node(self, input)
otype = GpuArrayType(
dtype=res.outputs[0].type.dtype,
broadcastable=res.outputs[0].type.broadcastable,
context_name=ctx_name,
)
input = as_gpuarray_variable(input, ctx_name)
return Apply(self, [input], [otype()])
def __str__(self):
if self.inplace:
s = "InplaceGpuDimShuffle{%s}"
else:
s = "GpuDimShuffle{%s}"
return s % (",".join(str(x) for x in self.new_order))
def perform(self, node, inp, out, params):
(input,) = inp
(storage,) = out
res = input
res = res.transpose(self.shuffle + self.drop)
shape = list(res.shape[: len(self.shuffle)])
for augm in self.augment:
shape.insert(augm, 1)
res = res.reshape(shape)
if not self.inplace:
res = res.copy()
storage[0] = res
class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype, _NoPythonOp):
"""
GpuCAReduceCuda is a Reduction along some dimensions by a scalar op.
Parameters
----------
reduce_mask
The dimensions along which to reduce. The `reduce_mask` is a tuple of
booleans (actually integers 0 or 1) that specify for each input
dimension, whether to reduce it (1) or not (0).
pre_scalar_op
If present, must be a scalar op with only 1 input. We will execute it
on the input value before reduction.
Examples
--------
When scalar_op is an `aesara.scalar.basic.Add` instance:
- reduce_mask == (1,) sums a vector to a scalar
- reduce_mask == (1,0) computes the sum of each column in a matrix
- reduce_mask == (0,1) computes the sum of each row in a matrix
- reduce_mask == (1,1,1) computes the sum of all elements in a 3-tensor.
Notes
-----
Any reduce_mask of all zeros is a sort of 'copy', and may be removed during
graph optimization.
This Op is a work in progress.
This op was recently upgraded from just GpuSum a general CAReduce. Not
many code cases are supported for scalar_op being anything other than
scalar.Add instances yet.
Important note: if you implement new cases for this op, be sure to
benchmark them and make sure that they actually result in a speedup.
GPUs are not especially well-suited to reduction operations so it is
quite possible that the GPU might be slower for some cases.
"""
__props__ = (
"axis",
"reduce_mask",
"dtype",
"acc_dtype",
"scalar_op",
"pre_scalar_op",
)
_f16_ok = True
verbose = 0
def __init__(
self,
scalar_op,
axis=None,
reduce_mask=None,
dtype=None,
acc_dtype=None,
pre_scalar_op=None,
):
if reduce_mask is not None:
reduce_mask = tuple(reduce_mask)
self.reduce_mask = reduce_mask
# used to make sure that calls to scalar op
# have unique name arguments
self._n_scalar_op_calls = 0
CAReduceDtype.__init__(
self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype
)
self.pre_scalar_op = pre_scalar_op
if pre_scalar_op:
assert pre_scalar_op.nin == 1
def __str__(self):
pre = ""
if self.pre_scalar_op:
pre = f"pre={self.pre_scalar_op},red="
ax = ""
if self.axis is not None:
ax = f"{{{', '.join(str(x) for x in self.axis)}}}"
return f"GpuCAReduceCuda{{{pre}{str(self.scalar_op)}}}{ax}"
def __setstate__(self, d):
self.__dict__.update(d)
# For unpickling of old ops.
if not hasattr(self, "pre_scalar_op"):
self.pre_scalar_op = None
def make_node(self, x):
x = as_gpuarray_variable(x, infer_context_name(x))
if x.type.context.kind != b"cuda":
raise TypeError("GpuCAReduceCuda doesn't work for non-cuda devices")
ret = super().make_node(x)
self = copy.copy(self)
self.axis = ret.op.axis
if self.pre_scalar_op:
# Currently we only tested pre_scalar_op that don't cause
# upcast.
assert Elemwise(self.pre_scalar_op)(x).dtype == x.dtype
if self.reduce_mask is None:
if self.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in self.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
self.reduce_mask = tuple(reduce_mask)
if x.type.ndim != len(self.reduce_mask):
raise TypeError(f"x must have rank {len(self.reduce_mask)}")
if (
"complex" in x.dtype
or "complex" in ret.outputs[0].dtype
or "complex" in self._acc_dtype(x.dtype)
):
raise NotImplementedError("We don't support complex in gpu reduction")
return Apply(
self,
[x],
[
GpuArrayType(
ret.outputs[0].dtype,
ret.outputs[0].type.broadcastable,
context_name=x.type.context_name,
)()
],
)
def supports_c_code(self, inputs):
"""
Returns True if the current op and reduce pattern has functioning C code.
"""
# If we don't even have the right method, we certainly
# don't support the C code
# (This is the test that used to be implemented by
# local_gpu_sum)
pattern = "".join(str(i) for i in self.reduce_mask)
if not hasattr(self, f"c_code_reduce_{pattern}"):
return False
# Now that this is a general reduction op, we might
# have a method for a pattern, but that pattern
# might not be implemented for the current scalar op.
# To detect this more complicated situation, we
# make fake arguments to c_code, try to run them,
# and see if NotImplementedError gets raised.
node = self.make_node(*inputs)
name = "fake_name"
inp = [f"fake_input_name_{i}" for i in range(len(inputs))]
out = [f"fake_output_name_{i}" for i in range(len(node.outputs))]
sub = {"fail": "fake failure code", "params": "fake context"}
try:
self.c_code(node, name, inp, out, sub)
if not self.gpu_kernels(node, name):
return False
except NotImplementedError:
return False
return True
def c_headers(self, **kwargs):
return ["<numpy_compat.h>", "<gpuarray/types.h>"]
def c_support_code(self, **kwargs):
return """
template <typename T>
static T ceil_intdiv(T a, T b)
{
return (a/b) + ((a % b) ? 1: 0);
}
"""
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
nd_in = node.inputs[0].type.ndim
nd_out = node.outputs[0].type.ndim
# For complex, we need to use aesara_complex* in the c code to
# have it run. But libgpuarray don't understand it.
in_dtype = node.inputs[0].type.dtype_specs()[1]
out_dtype = node.outputs[0].type.dtype_specs()[1]
gin_dtype = "npy_" + node.inputs[0].dtype
gout_dtype = "npy_" + node.outputs[0].dtype
assert nd_in - nd_out == sum(self.reduce_mask)
sio = StringIO()
fail = sub["fail"]
ctx = sub["params"]
# check input
print(
"""
if (PyGpuArray_NDIM(%(x)s) != %(nd_in)s)
{
PyErr_Format(PyExc_TypeError,
"required nd=%(nd_in)s, got nd=%%u", PyGpuArray_NDIM(%(x)s));
%(fail)s;
}
"""
% locals(),
file=sio,
)
# It might be nice to use a property of the op class to do this,
# but tensor.elemwise.CAReduce has this exact same check so I guess
# this is OK to do
if self.scalar_op in [aes.scalar_minimum, aes.scalar_maximum]:
conds = [
f"(PyGpuArray_DIMS({x})[{i}] == 0)"
for i in range(nd_in)
if self.reduce_mask[i]
]
assert len(conds) > 0
cond = "(" + " || ".join(conds) + ")"
print(
"""
if %(cond)s
{
PyErr_Format(PyExc_ValueError," tried to reduce a 0-length axis.");
%(fail)s;
}
"""
% locals(),
file=sio,
)
#
# alloc an output if we need one
#
# check the basics of out output
print(
f"""
if ( !{z}
|| (PyGpuArray_NDIM({z}) != {nd_out})
""",
file=sio,
)
# ensure that the output has the right non-reduced dimensions
j = 0
for i in range(nd_in):
if not self.reduce_mask[i]:
print(
" || (PyGpuArray_DIMS(%(z)s)[%(j)s] != PyGpuArray_DIMS(%(x)s)[%(i)d]) "
% locals(),
file=sio,
)
j += 1
print(
"""
)
{
"""
% locals(),
file=sio,
)
if nd_out > 0:
print(f"size_t new_dims[{nd_out}]; ", file=sio)
else:
print("size_t *new_dims=NULL; ", file=sio)
j = 0
for i in range(nd_in):
if not self.reduce_mask[i]:
print(
f"new_dims[{j}] = PyGpuArray_DIMS({x})[{i}];",
file=sio,
)
j += 1
out_typecode = dtype_to_typecode(gout_dtype[4:])
print(
"""
Py_XDECREF(%(z)s);
%(z)s = pygpu_empty(%(nd_out)s, new_dims,
%(out_typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (NULL == %(z)s)
{
PyErr_Format(PyExc_RuntimeError, "Failed to allocate output");
%(fail)s;
}
}
"""
% locals(),
file=sio,
)
# \begin bracket the reduction in a check that there is
# actually work to do
if getattr(self.scalar_op, "identity", None) == 0:
zero_shp = f"GpuArray_memset(&{z}->ga, 0)"
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
scalar_op = self.scalar_op
zero_shp = (
"""
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0"
" for this scalar_op: %(scalar_op)s");
%(fail)s;
"""
% locals()
)
print(
"""
if (PyGpuArray_SIZE(%(z)s) && ! PyGpuArray_SIZE(%(x)s)){
%(zero_shp)s;
}
else if (PyGpuArray_SIZE(%(z)s))
{
"""
% locals(),
file=sio,
)
#
# Now perform the reduction
#
if all(i == 1 for i in self.reduce_mask):
# check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code.
# TODO: check if we are ccontiguous when we un-dimshuffle
# TODO: if only some dims are ccontiguous, call version with less dims.
print("if(%(x)s->ga.flags & GA_C_CONTIGUOUS){" % locals(), file=sio)
self.c_code_reduce_ccontig(sio, node, name, x, z, fail)
print("}else{", file=sio)
getattr(self, f"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}")(
sio, node, name, x, z, fail
)
print("}", file=sio)
else:
getattr(self, f"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}")(
sio, node, name, x, z, fail
)
# \end bracket the reduction ...
print(
"""
}
"""
% locals(),
file=sio,
)
return sio.getvalue()
def _makecall(
self, node, name, x, z, fail, pattern=None, extra_dims=(), extra_strides=()
):
"""
Return a string for making a kernel call.
The return value looks something like:
.. code-block:: c
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
if (verbose)
printf("running kernel_reduce_10_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = {
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0,
(void *)&stride_A1,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
sio = StringIO()
if pattern is None:
pattern = "".join(str(c) for c in self.reduce_mask)
ndim = len(self.reduce_mask)
nd_out = ndim - sum(self.reduce_mask)
shapes_format = f"shape=({','.join(['%llu'] * node.inputs[0].ndim)})"
shapes_data = ",".join(
[f"(size_t) PyGpuArray_DIMS({x})[{i}]" for i in range(node.inputs[0].ndim)]
)
k_var = f"kernel_reduce_{pattern}_{name}"
params = []
for i in range(ndim):
params.append(f"(void *)&PyGpuArray_DIMS({x})[{i}]")
for declaration, value in extra_dims:
print(declaration % locals(), file=sio)
params.append(value)
params.append(f"(void *){x}->ga.data")
params.append(f"(void *)&{x}->ga.offset")
for i in range(ndim):
print(
"""
ssize_t stride_A%(i)d = PyGpuArray_STRIDES(%(x)s)[%(i)s]/sizeof(%(in_dtype)s);
"""
% locals(),
file=sio,
)
params.append("(void *)&stride_A%(i)d" % locals())
for declaration, value in extra_strides:
print(declaration % locals(), file=sio)
params.append(value)
params.append(f"(void *){z}->ga.data")
params.append(f"(void *)&{z}->ga.offset")
for i in range(nd_out):
print(
"""
ssize_t stride_Z%(i)d = PyGpuArray_STRIDES(%(z)s)[%(i)s]/sizeof(%(out_dtype)s);
"""
% locals(),
file=sio,
)
params.append("(void *)&stride_Z%(i)d" % locals())
kernel_params = ", ".join(params)
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
if (verbose)
printf("running kernel_reduce_%(pattern)s_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = { %(kernel_params)s };
if (verbose>1)
printf("n_threads[0]=%%lu, n_threads[1]=%%lu, "
"n_threads[2]=%%lu, n_threads=%%lu, "
"n_blocks[0]=%%lu, n_blocks[1]=%%lu, n_blocks[2]=%%lu, "
"n_blocks=%%lu, n_shared=%%d, %(shapes_format)s\\n",
n_threads[0],n_threads[1],
n_threads[2],
n_threads[0]*n_threads[1]*
n_threads[2],
n_blocks[0],n_blocks[1],n_blocks[2],
n_blocks[0]*n_blocks[1]*n_blocks[2],
n_shared, %(shapes_data)s);
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
"""
% locals(),
file=sio,
)
return sio.getvalue()
def _k_decl(self, node, nodename, pattern=None, ndim=None, reduce_mask=None):
"""
Return a string to declare a kernel function.
The result will look something like this:
.. code-block:: c
KERNEL void kernel_reduce_110_%(nodename)s(
const ga_size d0,
const ga_size d1,
const ga_size d2,
const %(in_type)s *A,
const ga_size offset_A,
const ga_ssize sA0,
const ga_ssize sA1,
const ga_ssize sA2,
%(out_type)s * Z,
const ga_size offset_Z,
const ga_ssize sZ0)
Since the nodename is unique, we don't need to put the name
of the scalar_op in here.
"""
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
if reduce_mask is None:
reduce_mask = self.reduce_mask
if ndim is None:
ndim = len(reduce_mask)
if pattern is None:
pattern = "".join(str(i) for i in reduce_mask)
kname = f"kernel_reduce_{pattern}"
k_var = f"kernel_reduce_{pattern}_{nodename}"
params = []
sio = StringIO()
print(
f"""
KERNEL void {kname}(
""",
file=sio,
)
for i in range(ndim):
params.append("uintp")
print(
f"""
const ga_size d{i},
""",
file=sio,
)
params.append(gpuarray.GpuArray)
params.append("uintp")
print(
f"""
const {in_type} *A, const ga_size offset_A,
""",
file=sio,
)
for i in range(ndim):
params.append("intp")
print(
f"""
const ga_ssize sA{i},
""",
file=sio,
)
params.append(gpuarray.GpuArray)
params.append("uintp")
print(
f"""
{out_type} * Z, const ga_size offset_Z
""",
file=sio,
)
for i in range(ndim - sum(reduce_mask)):
params.append("intp")
print(
f"""
, const ga_ssize sZ{i}
""",
file=sio,
)
print(")", file=sio)
return sio.getvalue(), kname, params, k_var
def _k_init(self, node, nodename):
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
# We need to use aesara_complex* and not npy_complex*
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
return (
"""
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y
+ threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = 0;
"""
% locals()
)
def _assign_init(self, first_item, dtype):
"""
This return the initial value for myresult.
If the scalar op have an identity value, return it.
Otherwise, check that the scalar op is maximum or minimum
and return first_item. It should be the first element of the reduction.
As the maximum and minimum of the same value don't change, this work.
"""
if hasattr(self.scalar_op, "identity"):
return str(self.scalar_op.identity)
else:
assert isinstance(self.scalar_op, (aes.ScalarMaximum, aes.ScalarMinimum))
if self.pre_scalar_op: # TODO: multiple dtypes
# dtype = node.inputs[0].dtype
dummy_var = aes.Scalar(dtype=dtype)()
dummy_node = self.pre_scalar_op.make_node(dummy_var)
dummy_name = "assign_init_pre_scalar_op" + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(
dummy_node, dummy_name, (first_item,), ("",), {}
)
assert t.startswith(" = ")
first_item = t[3:]
if first_item[-1] == ";":
first_item = first_item[:-1]
return first_item
def _assign_reduce(self, node, name, left, right, sub, pre):
"""
Parameters
----------
node
The node argument to this op's c_code.
name
The name argument to this op's c_code.
left
A C code string identifying an lvalue.
right
A C code string identifying an expression.
sub
The sub argument to this op's c_code.
pre
If True, we will add the pre_scalar_op.c_code.
Returns
-------
str
C code to reduce left and right, assigning the result to left.
"""
(x,) = node.inputs
in_dtype = x.dtype
out_dtype = node.outputs[0].dtype
dummy_left = Scalar(dtype=out_dtype)()
dummy_right = Scalar(dtype=in_dtype)()
dummy_node = self.scalar_op.make_node(dummy_left, dummy_right)
dummy_name = name + "_scalar_op" + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
if pre and self.pre_scalar_op:
assert left == "myresult"
dummy_node = self.pre_scalar_op.make_node(dummy_left)
dummy_name = name + "_scalar_op" + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(dummy_node, dummy_name, (right,), ("",), sub)
assert t.startswith(" = ")
right = t[3:]
if right[-1] == ";":
right = right[:-1]
return self.scalar_op.c_code(
dummy_node, dummy_name, (left, right), (left,), sub
)
def _k_reduce_buf(self, z_pos, node, name, sub):
"""
WRITEME
Parameters
----------
node, name, sub
These should be passed through from the original call to c_code.
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
write_out = write_w(node.outputs[0].dtype)
current_version = """
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < warpSize) {
//round up all the partial sums into the first `warpSize` elements
for (int i = threadNum + warpSize; i < threadCount; i += warpSize)
{
"""
current_version += (
self._assign_reduce(node, name, "myresult", "buf[i]", sub, False)
+ """
}
buf[threadNum] = myresult;
}
__syncthreads();
for (unsigned int _n = warpSize / 2; _n > 0; _n /= 2) {
if (threadNum < _n && threadNum + _n < threadCount)
"""
)
current_version += self._assign_reduce(
node, name, "buf[threadNum]", "buf[threadNum+_n]", sub, False
)
current_version += """
__syncthreads();
}
if (threadNum == 0) {
%(z_pos)s = %(write_out)s(buf[0]);
}
"""
current_version = current_version % locals()
return current_version
# Threads must be organized as: threadNum%nb_reduce correspond to the same sum
# nb_reduce<=warpSize
def _k_reduce_buf_multiple(self, z_pos, node, name, nb_reduce):
reduce_fct = self._assign_reduce(node, name, "myresult", "buf[i]", {}, False)
write_out = write_w(node.outputs[0].dtype)
return (
"""
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < %(nb_reduce)s)
{
//round up all the partial sums into the first `nb_reduce` elements
for (int i = threadNum + %(nb_reduce)s; i < threadCount; i += %(nb_reduce)s)
{
%(reduce_fct)s;
}
%(z_pos)s = %(write_out)s(myresult);
}
"""
% locals()
)
def c_code_reduce_ccontig(self, sio, node, name, x, z, fail):
verbose = self.verbose
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
if getattr(self.scalar_op, "identity", None) == 0:
zero_shp = f"GpuArray_memset(&{z}->ga, 0)"
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
zero_shp = (
"""
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op");
%(fail)s;
"""
% locals()
)
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = f"kernel_reduce_ccontig_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
{
if(PyGpuArray_SIZE(%(x)s)==0){
%(zero_shp)s;
}else{
int verbose = %(verbose)s;
size_t numEls = PyGpuArray_SIZE(%(x)s);
size_t n_threads = std::min(numEls, (size_t) 256);
size_t n_blocks = 1;
void *kernel_params[] = {(void *)&numEls,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset};
if (verbose) printf("running kernel_reduce_ccontig_%(name)s"
" n_threads=%%llu, size=%%llu, ndim=%%u\\n",
n_threads, numEls,
PyGpuArray_NDIM(%(x)s));
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads;
int err = GpuKernel_call(&%(k_var)s, 1, &n_blocks, &n_threads, n_shared, kernel_params);
%(err_check)s
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_1(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_11(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[1] * n_threads[0] <= 256) ++n_threads[1];
n_threads[1] -= 1;
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_01X(self, sio, node, name, x, z, fail, N):
"""
Parameters
----------
N
The number of 1 in the pattern N=1 -> 01, N=2 -> 011 N=3 ->0111
Work for N=1,2,3.
"""
assert N in [1, 2, 3]
verbose = self.verbose
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
makecall = self._makecall(node, name, x, z, fail)
N_pattern = "".join(["1"] * N)
param_dim = ",".join([f"PyGpuArray_DIMS({x})[{i}]" for i in range(N + 1)])
strides_dim = ",".join(
[f"PyGpuArray_STRIDES({x})[{i}]/sizeof({in_dtype})" for i in range(N + 1)]
)
threads_y = (
"""
//get as many y threads as we can fit
while (n_threads[0] * (n_threads[1]+1) <= 256)
{
if (n_threads[1] < PyGpuArray_DIMS(%(x)s)[%(N)s-1])
n_threads[1] += 1;
else
break;
}"""
% locals()
)
threads_z = (
"""
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
{
if (n_threads[2] < PyGpuArray_DIMS(%(x)s)[%(N)s-2])
n_threads[2] += 1;
else
break;
}
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
"""
% locals()
)
if len(self.reduce_mask) == 2:
threads_y = ""
threads_z = ""
if len(self.reduce_mask) == 3:
threads_z = ""
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[%(N)s], (size_t) 256), 1, 1};
%(threads_y)s
%(threads_z)s
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_01(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 1)
def c_code_reduce_011(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 2)
def c_code_reduce_0111(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 3)
def c_code_reduce_10(self, sio, node, name, x, z, fail):
verbose = self.verbose
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = f"kernel_reduce_10_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
{
int verbose = %(verbose)s;
if(PyGpuArray_STRIDES(%(x)s)[0]>
PyGpuArray_STRIDES(%(x)s)[1]){
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
GpuKernel *%(k_var)s = &kernel_reduce_010_AD_%(name)s;
size_t A = 1;
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[1];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}else{
GpuKernel *%(k_var)s = &kernel_reduce_010_%(name)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 4096), 1};
if (verbose) {
fprintf(stderr,
"running kernel_reduce_10_%(name)s n_blocks=(%%llu,%%llu)\\n",
(unsigned long long)n_blocks[0],
(unsigned long long)n_blocks[1]);
}
assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]);
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0];
size_t dim_0 = 1;
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&dim_0,
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_010(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
makecall_inner = self._makecall(node, name, x, z, fail, pattern="010_inner")
pattern = "".join(str(i) for i in self.reduce_mask)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
k_var = f"kernel_reduce_010_AD_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
print(
"""
{
//int n_summations = PyGpuArray_DIMS(%(x)s)[0] * PyGpuArray_DIMS(%(x)s)[2];
//if ((n_summations >= 15 * 32) && (PyGpuArray_DIMS(%(x)s)[2]>=16))
if (1) // if the alternative is less buggy, consider not using this branch
{
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
size_t A = PyGpuArray_DIMS(%(x)s)[0];
size_t B = PyGpuArray_DIMS(%(x)s)[1];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}
else
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min((size_t) 32, PyGpuArray_DIMS(%(x)s)[2]), 1, 1};
while( (n_threads[0]*(n_threads[1]+1)<=256)
&& (n_threads[1]<PyGpuArray_DIMS(%(x)s)[1])){
n_threads[1]++;
}
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096), 1, 1};
n_blocks[1] = std::min(
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0])
);
if(std::min(std::min(PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s),
PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s)),
PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s))
==PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s)
&& n_blocks[1]==ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0])){
if(verbose>1)
printf("n_block.x.1=%%d, n_block.x.2=%%d, n_block.y.1=%%d, n_block.y.2=%%d,\\n",
PyGpuArray_DIMS(%(x)s)[0],4096,
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0]));
assert(n_threads[0]<=32);
%(makecall_inner)s
}else{
n_threads[0] = std::min(PyGpuArray_DIMS(%(x)s)[1],
(size_t) 256);
n_blocks[0] = std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096);
n_blocks[1] = std::min(
PyGpuArray_DIMS(%(x)s)[2],
(size_t)(4096 / n_blocks[0])
);
%(makecall)s
}
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_0101(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[0], PyGpuArray_DIMS(%(x)s)[2], 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_100(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = f"kernel_reduce_010_AD_{name}"
err_check = (
"""
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
"""
% locals()
)
# use threadIdx.x for i0
# use blockIdx.x for i1
# use blockIdx.y for i2
print(
"""
{
int verbose = %(verbose)s;
if (PyGpuArray_STRIDES(%(x)s)[2] != sizeof(%(in_dtype)s)){
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t)4096), 1, 1};
while (n_blocks[0] * (n_blocks[1]+1) <= 4096 &&
n_blocks[1] <= PyGpuArray_DIMS(%(x)s)[2])
{
n_blocks[1] += 1;
}
%(makecall)s
}
else
{ // reuse 010_AD kernel, we transpose the 2 first dim
// See the reduction for the real 010_AD kernel for
// explanation. We do this to get coalesced read.
size_t n_threads[3] = {32, 1, 1};
size_t A = PyGpuArray_DIMS(%(x)s)[1];
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
size_t n_shared = 0;
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}
}
"""
% locals(),
file=sio,
)
def c_code_reduce_110(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[0]*n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[2], 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_001(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096)
{
if (n_blocks[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_blocks[1] += 1;
}
n_blocks[1] -= 1;
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_101(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(
node,
name,
x,
z,
fail,
extra_dims=[("size_t one = 1;", "(void *) &one")],
extra_strides=[("ssize_t sone = 1;", "(void *) &sone")],
pattern="1011",
)
print(
"""
{
int verbose = %(verbose)s;
// size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3],
// (size_t) 256), 1, 1};
size_t n_threads[3] = {1, 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_111(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_0011(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
print(
"""
{
int verbose = %(verbose)s;
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096 &&
n_blocks[1] < PyGpuArray_DIMS(%(x)s)[1])
{
n_blocks[1] += 1;
}
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256
&& n_threads[1] < PyGpuArray_DIMS(%(x)s)[2]
&& n_threads[0] * n_threads[1] * sizeof(%(acc_dtype)s) <=(15*1024-200))
{
n_threads[1] += 1;
}
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_1111(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_reduce_1011(self, sio, node, name, x, z, fail):
verbose = self.verbose
makecall = self._makecall(node, name, x, z, fail)
print(
"""
{
int verbose = %(verbose)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
"""
% locals(),
file=sio,
)
def c_code_cache_version_apply(self, node):
version = [
24,
self.verbose,
] # the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend...
scalar_node = Apply(
self.scalar_op,
[Scalar(dtype=input.type.dtype)() for input in node.inputs],
[Scalar(dtype=output.type.dtype)() for output in node.outputs],
)
version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node))
for i in node.inputs + node.outputs:
version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version())
version.extend(self.kernel_version(node))
if all(version):
return tuple(version)
else:
return ()
def gpu_kernels(self, node, nodename):
nd_in = len(self.reduce_mask)
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
assign_dtype = in_dtype
flags = Kernel.get_flags(in_dtype, acc_dtype, out_dtype)
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
load_in = load_w(in_dtype)
write_out = write_w(out_dtype)
kernels = []
if all(i == 1 for i in self.reduce_mask):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node, nodename, "myresult", load_in + "(A[i0])", {}, True
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
kname = "kernel_reduce_ccontig"
k_var = "kernel_reduce_ccontig_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
%(out_type)s *Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = ["uintp", gpuarray.GpuArray, "uintp", gpuarray.GpuArray, "uintp"]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1,):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node, nodename, "myresult", load_in + "(A[i0 * sA0])", {}, True
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
kname = "kernel_reduce_1"
k_var = "kernel_reduce_1_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
gpuarray.GpuArray,
"uintp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1])",
{},
True,
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
kname = "kernel_reduce_11"
k_var = "kernel_reduce_11_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y*blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
# 01, 011, 0111
if (
0 == self.reduce_mask[0]
and all(self.reduce_mask[1:])
and nd_in in [2, 3, 4]
):
# this kernel uses one block for each row.
# threads per block for each element per row.
N_pattern = "".join(["1"] * (nd_in - 1))
# TODO: is it faster to hardcode sA3, etc. in the later
# code, rather than have the for_* variables declare them
# and the later code use their names?
if nd_in == 2:
for_i1 = "for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)"
first_i1 = "threadIdx.x"
sA1 = "sA1"
for_i2 = "int i2=0, sA2=0;"
sA2 = "0"
first_i2 = "0"
for_i3 = "int i3=0, sA3=0;"
sA3 = "0"
first_i3 = "0"
if nd_in == 3:
for_i1 = "for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)"
first_i1 = "threadIdx.y"
sA1 = "sA1"
for_i2 = "for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)"
first_i2 = "threadIdx.x"
sA2 = "sA2"
for_i3 = "int i3=0, sA3=0;"
first_i3 = 0
sA3 = "0"
if nd_in == 4:
for_i1 = "for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)"
first_i1 = "threadIdx.z"
sA1 = "sA1"
for_i2 = "for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)"
first_i2 = "threadIdx.y"
sA2 = "sA2"
for_i3 = "for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)"
first_i3 = "threadIdx.x"
sA3 = "sA3"
reducebuf = self._k_reduce_buf("Z[i0 * sZ0]", node, nodename, sub={})
param_dim = ",".join([f"const ga_size d{i}" for i in range(nd_in)])
param_strides = ",".join([f"const ga_ssize sA{i}" for i in range(nd_in)])
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_init = self._assign_init(
load_in
+ "(A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0])"
% locals(),
assign_dtype,
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0])",
{},
True,
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){
myresult = %(reduce_init)s;
%(for_i1)s{
%(for_i2)s{
%(for_i3)s{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i2*sZ1]", node, nodename, sub={}
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2])", assign_dtype
)
kname = "kernel_reduce_010"
k_var = "kernel_reduce_010_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask in [(0, 1, 0), (1, 0), (1, 0, 0)]:
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(X[a * sX0 + b * sX1 + c * sX2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(X[a * sX0 + 0 * sX1 + c * sX2])", assign_dtype
)
kname = "kernel_reduce_010_AD"
k_var = "kernel_reduce_010_AD_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size A, const ga_size B, const ga_size C, const ga_size D,
const %(in_type)s *X, const ga_size offset_X,
const ga_ssize sX0, const ga_ssize sX1, const ga_ssize sX2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
X = (const %(in_type)s *)(((char *)X)+offset_X);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = 0;
for (int a = blockIdx.x; a < A; a += gridDim.x)
{
for (int i2_D = blockIdx.y; i2_D < D; i2_D += gridDim.y)
{
int c = i2_D * 32 + threadIdx.x;
if (c < C)
{
myresult = %(reduce_init)s;
for (int b = 0; b < B; ++b)
{
%(reduce_fct)s;
}
Z[a * sZ0 + c * sZ1] = %(write_out)s(myresult);
}
}
}
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 1, 0):
#
# This kernel is optimized when the inner most dimensions
# have the smallest stride.
# this kernel uses one block for multiple column(up to 32TODO),
# threads per block for each element per column.
# thread.x = dim 2 contiguous
# thread.y = dim 1
# block.x = dim 0
# block.y = dim 1 rest
init = self._k_init(node, nodename)
decl, kname, params, k_var = self._k_decl(
node, nodename, pattern="010_inner"
)
reducebuf = self._k_reduce_buf_multiple(
"Z[i0 * sZ0 + i2*sZ1]", node, nodename, "blockDim.x"
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + 0 * sA1 + i2 * sA2])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y*blockDim.x+threadIdx.x; i2 < d2; i2 += gridDim.y*blockDim.x)
{
myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf(
"Z[blockIdx.x * sZ0]", node, nodename, sub={}
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[blockIdx.x * sA2])", assign_dtype
)
kname = "kernel_reduce_110"
k_var = "kernel_reduce_110_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 0, 0):
reducebuf = self._k_reduce_buf(
"Z[i1 * sZ0 + i2 * sZ1]", node, nodename, sub={}
)
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i1 * sA1 + i2 * sA2])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
for (int i1 = blockIdx.x; i1 < d1; i1 += gridDim.x)
{
myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1, 1):
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i1 * sZ1]", node, nodename, sub={}
)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + i1 * sA1])", assign_dtype
)
kname = "kernel_reduce_001"
k_var = "kernel_reduce_001_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 0, 1, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i1 * sZ1]", node, nodename, sub={}
)
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + i1 * sA1])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (0, 1, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf(
"Z[i0 * sZ0 + i2 * sZ1]", node, nodename, sub={}
)
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[i0 * sA0 + i2 * sA2])", assign_dtype
)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 1, 1, 1):
reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
sio = StringIO()
print(
"""#include "cluda.h"
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = 0; i0 < d0; i0++)
for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
if self.reduce_mask == (1, 0, 1, 1) or self.reduce_mask == (1, 0, 1):
reducebuf = self._k_reduce_buf("Z[blockIdx.x*sZ0]", node, nodename, sub={})
reduce_fct = self._assign_reduce(
node,
nodename,
"myresult",
load_in + "(A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3])",
{},
True,
)
reduce_init = self._assign_init(
load_in + "(A[blockIdx.x * sA1])", assign_dtype
)
kname = "kernel_reduce_1011"
k_var = "kernel_reduce_1011_" + nodename
sio = StringIO()
print(
"""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2, const ga_size d3,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, const ga_ssize sA3,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
%(acc_type)s myresult = %(reduce_init)s;
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
"""
% locals(),
file=sio,
)
params = [
"uintp",
"uintp",
"uintp",
"uintp",
gpuarray.GpuArray,
"uintp",
"intp",
"intp",
"intp",
"intp",
gpuarray.GpuArray,
"uintp",
"intp",
]
kernels.append(
Kernel(
code=sio.getvalue(),
name=kname,
params=params,
flags=flags,
objvar=k_var,
)
)
return kernels
class GpuErfinv(Erfinv):
"""
Inverse error function for GPU.
"""
def c_headers(self, **kwargs):
return ["math_functions.h", "cublas_v2.h"]
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
if node.inputs[0].type in complex_types:
raise NotImplementedError("type not supported", type)
# NB: CUDA erfinv function (GPU op) returns NaN if x not in [-1;1],
# while `scipy.special.erfinv` (CPU op) returns an infinite (-inf if x < -1, +inf if x > 1).
# For consistency of CPU and GPU ops, we wrap the CUDA erfinv in the following conditions
# to ensure that GPU op returns the same values as CPU op.
return (
"%(z)s = (%(x)s <= -1) ? erfinv(-1.0): ((%(x)s >= 1) ? erfinv(1.0): erfinv(%(x)s));"
% locals()
)
gpu_erfinv = GpuErfinv(upgrade_to_float_no_complex, name="gpu_erfinv")
class GpuErfcinv(Erfcinv):
"""
Inverse complementary error function for GPU.
"""
def c_headers(self, **kwargs):
return ["math_functions.h", "cublas_v2.h"]
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
if node.inputs[0].type in complex_types:
raise NotImplementedError("type not supported", type)
# NB: CUDA erfcinv function (GPU op) returns NaN if x not in [0;2],
# while `scipy.special.erfcinv` (CPU op) returns an infinite (+inf if x < 0, -inf if x > 2).
# For consistency of CPU and GPU ops, we wrap the CUDA erfcinv in the following conditions
# to ensure that GPU op returns the same values as CPU op.
return (
"%(z)s = (%(x)s <= 0) ? erfcinv(0.0): ((%(x)s >= 2) ? erfcinv(2.0): erfcinv(%(x)s));"
% locals()
)
gpu_erfcinv = GpuErfcinv(upgrade_to_float_no_complex, name="gpu_erfcinv")
# Caching GpuCAReduceCuda
def gpu_ca_reduce_cuda(
scalar_op,
axis=None,
reduce_mask=None,
dtype=None,
acc_dtype=None,
pre_scalar_op=None,
):
key = (scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op)
if key not in gpu_ca_reduce_cuda.cache:
gpu_ca_reduce_cuda.cache[key] = GpuCAReduceCuda(
scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op
)
return gpu_ca_reduce_cuda.cache[key]
gpu_ca_reduce_cuda.cache = {}
class GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype):
"""
CAReduce that reuse the python code from gpuarray.
"""
def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None):
if scalar_op.identity is None:
raise ValueError("No identity on scalar op")
super().__init__(scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype)
def __str__(self):
ax = ""
if self.axis is not None:
ax = f"{{{', '.join(str(x) for x in self.axis)}}}"
return f"GpuReduce{{{self.scalar_op}}}{ax}"
def make_node(self, input):
ctx_name = infer_context_name(input)
res = super().make_node(input)
input = as_gpuarray_variable(input, ctx_name)
otype = GpuArrayType(
dtype=res.outputs[0].dtype,
broadcastable=res.outputs[0].broadcastable,
context_name=ctx_name,
)
if res.op.axis is not None:
redux = []
for i in range(len(input.type.broadcastable)):
redux.append(i in res.op.axis)
# since redux is just another way to describe what is in axis
# it doesn't need to be compared in __eq__ or __hash__
res.op.redux = redux
return Apply(res.op, [input], [otype()])
def get_params(self, node):
return node.outputs[0].type.context
def prepare_node(self, node, storage_map, compute_map, impl):
# cache the kernel object
self.get_kernel_cache(node)
def get_kernel_cache(self, node):
attr = "@cache_reduction_k"
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
if not hasattr(node, attr):
acc_dtype = getattr(self, "acc_dtype", None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
if any(redux):
setattr(node, attr, self.generate_kernel(node, acc_dtype, redux))
if any(redux):
return getattr(node, attr)
def gpu_kernels(self, node, name):
if not any(getattr(self, "redux", [node.inputs[0].ndim != 0])):
# Some OpenCL compilers do not accept no-arguments empty kernels
src = '#include "cluda.h"\nKERNEL void reduk(GLOBAL_MEM float *a) { a[0] = 0; }'
params = ["float32"]
else:
k = self.get_kernel_cache(node)
_, src, _, _ = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)
nd = node.inputs[0].ndim
params = ["uint32", gpuarray.GpuArray, "uint32"]
params.extend("uint32" for _ in range(nd))
params.append(gpuarray.GpuArray)
params.append("uint32")
params.extend("int32" for _ in range(nd))
acc_dtype = getattr(self, "acc_dtype", None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
return [
Kernel(
code=src,
name="reduk",
params=params,
flags=Kernel.get_flags(
node.inputs[0].type.dtype, acc_dtype, node.outputs[0].type.dtype
),
objvar="k_reduk_" + name,
)
]
def c_code(self, node, name, inp, out, sub):
if not any(getattr(self, "redux", [node.inputs[0].ndim != 0])):
# We special case the no-reduction case since the gpu
# kernel has trouble handling it.
return """
Py_XDECREF(%(out)s);
%(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER);
if (!%(out)s) {
%(fail)s
}
""" % dict(
out=out[0], inp=inp[0], fail=sub["fail"]
)
k = self.get_kernel_cache(node)
_, src, _, ls = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
acc_dtype = getattr(self, "acc_dtype", None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
input = inp[0]
output = out[0]
nd_out = node.outputs[0].ndim
code = """
size_t gs = 1;
size_t ls;
unsigned int n = 1;
unsigned int proxy_dim[%(nd_in)s];
unsigned int proxy_off;
int proxy_str[%(nd_in)s];
void *args[%(n_args)s];
PyGpuArrayObject *tmp;
int err;
""" % dict(
n_args=4 + (node.inputs[0].ndim * 2), nd_in=node.inputs[0].ndim
)
if nd_out != 0:
code += """
size_t out_dims[%(nd_out)s];
int need_out = %(output)s == NULL || %(output)s->ga.nd != %(nd_out)s;
""" % dict(
nd_out=nd_out, output=output
)
j = 0
for i in range(node.inputs[0].ndim):
if not self.redux[i]:
code += """
out_dims[%(j)s] = %(input)s->ga.dimensions[%(i)s];
if (!need_out)
need_out |= %(output)s->ga.dimensions[%(j)s] != out_dims[%(j)s];
""" % dict(
j=j, i=i, input=input, output=output
)
j += 1
code += """
if (need_out) {
%(output)s = pygpu_empty(%(nd_out)s, out_dims, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(
output=output,
nd_out=nd_out,
fail=sub["fail"],
ctx=sub["params"],
out_type=dtype_to_typecode(node.outputs[0].type.dtype),
)
else:
code += """
if (%(output)s == NULL || %(output)s->ga.nd != 0) {
Py_XDECREF(%(output)s);
%(output)s = pygpu_empty(0, NULL, %(out_type)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(
output=output,
fail=sub["fail"],
ctx=sub["params"],
out_type=dtype_to_typecode(node.outputs[0].type.dtype),
)
if acc_dtype != node.outputs[0].type.dtype:
code += """
tmp = pygpu_empty(%(output)s->ga.nd, %(output)s->ga.dimensions,
%(acc_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!tmp) %(fail)s
""" % dict(
output=output,
fail=sub["fail"],
ctx=sub["params"],
acc_type=dtype_to_typecode(acc_dtype),
)
else:
code += f"""
tmp = {output};
Py_INCREF(tmp);
"""
# We need the proxies since we are passing a pointer to the
# data into the call and therefore we need a real copy of the
# data in the proper type.
code += """
args[0] = &n;
args[1] = tmp->ga.data;
args[2] = &tmp->ga.offset;
""" % dict(
output=output
)
p = 3
for i in range(node.inputs[0].ndim):
code += """
proxy_dim[%(i)s] = %(input)s->ga.dimensions[%(i)s];
args[%(p)s] = &proxy_dim[%(i)s];
n *= %(input)s->ga.dimensions[%(i)s];
""" % dict(
i=i, p=p, input=input
)
p += 1
if not redux[i]:
code += "gs *= %(input)s->ga.dimensions[%(i)s];" % dict(
input=input, i=i
)
code += """
args[%(p)s] = %(input)s->ga.data;
proxy_off = %(input)s->ga.offset;
args[%(p)s+1] = &proxy_off;
""" % dict(
p=p, input=input
)
p += 2
for i in range(node.inputs[0].ndim):
code += """
proxy_str[%(i)s] = %(input)s->ga.strides[%(i)s];
args[%(p)s] = &proxy_str[%(i)s];
""" % dict(
p=p, i=i, input=input
)
p += 1
code += """
if (gs == 0) gs = 1;
n /= gs;
ls = %(ls)s;
err = GpuKernel_call(&%(k_var)s, 1, &gs, &ls, 0, args);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s
}
if (%(cast_out)d) {
err = GpuArray_move(&%(output)s->ga, &tmp->ga);
Py_XDECREF(tmp);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY [cast]: %%s.",
GpuArray_error(&tmp->ga, err));
%(fail)s
}
} else {
Py_XDECREF(%(output)s);
%(output)s = tmp;
}
""" % dict(
k_var="k_reduk_" + name,
ls=ls,
fail=sub["fail"],
output=output,
input=input,
cast_out=bool(acc_dtype != node.outputs[0].type.dtype),
)
return code
def c_code_cache_version_apply(self, node):
return (4, self.kernel_version(node))
def generate_kernel(self, node, odtype, redux):
if isinstance(self.scalar_op, aes.Add):
reduce_expr = "a + b"
elif isinstance(self.scalar_op, aes.Mul):
reduce_expr = "a * b"
else:
raise NotImplementedError()
return ReductionKernel(
node.inputs[0].type.context,
odtype,
self.scalar_op.identity,
reduce_expr,
redux,
arguments=[make_argument(node.inputs[0], "a")],
init_nd=node.inputs[0].ndim,
)
def perform(self, node, inp, out, ctx):
(input,) = inp
(output,) = out
if self.axis is None:
redux = [True] * input.ndim
else:
redux = self.redux
if any(redux):
output[0] = self.get_kernel_cache(node)(input).astype(
copy=False, dtype=node.outputs[0].type.dtype
)
else:
output[0] = pygpu.gpuarray.array(
input, copy=True, dtype=node.outputs[0].type.dtype, context=ctx
)
# To allow reloading old pickled files
GpuCAReduce = GpuCAReduceCPY
| 35.256518 | 117 | 0.458534 |
473f4f710b1f9b93ad53af604dfaf1875d7227a4 | 13,125 | py | Python | main.py | Bookaa/LiuD_Kaleidoscope | 075319773c68931002cf8e4dc80f6747a2f87415 | [
"MIT"
] | null | null | null | main.py | Bookaa/LiuD_Kaleidoscope | 075319773c68931002cf8e4dc80f6747a2f87415 | [
"MIT"
] | null | null | null | main.py | Bookaa/LiuD_Kaleidoscope | 075319773c68931002cf8e4dc80f6747a2f87415 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# refer to https://github.com/eliben/pykaleidoscope/blob/master/chapter6.py
from ctypes import CFUNCTYPE, c_double
import llvmlite.ir as ir
import llvmlite.binding as llvm
import Ast_Ks
class LLVMCodeGenerator:
def __init__(self):
self.module = ir.Module()
self.builder = None
self.func_symtab = {}
def generate_code(self, node):
return self._codegen(node)
def _codegen(self, node):
if isinstance(node, Ast_Ks.ks_Module):
return self._codegen_Module(node)
if isinstance(node, Ast_Ks.ks_funcdef):
return self._codegen_funcdef(node)
if isinstance(node, Ast_Ks.ks_enclosed):
return self._codegen(node.v)
if isinstance(node, Ast_Ks.ks_ifcmd):
return self._codegen_ifcmd(node)
if isinstance(node, Ast_Ks.ks_body):
ret = None
for v in node.vlst:
ret = self._codegen(v)
return ret
if isinstance(node, Ast_Ks.ks_value):
lhs = self._codegen(node.v1)
rhs = self._codegen(node.v2)
op = node.s
if op == '+':
return self.builder.fadd(lhs, rhs, 'addtmp')
if op == '-':
return self.builder.fsub(lhs, rhs, 'subtmp')
if op == '*':
return self.builder.fmul(lhs, rhs, 'multmp')
if op == '<':
return self.builder.fcmp_unordered('<', lhs, rhs, 'cmptmp')
if op == '>':
return self.builder.fcmp_unordered('>', lhs, rhs, 'cmptmp')
if op == '|':
one = ir.Constant(ir.IntType(1), 1)
zero = ir.Constant(ir.IntType(1), 0)
return self._codegen_ifcmd_2([(lhs, one), (rhs, one)], zero)
assert False
if isinstance(node, Ast_Ks.ks_litVarname):
return self.func_symtab[node.n]
if isinstance(node, Ast_Ks.ks_litNumi):
return ir.Constant(ir.DoubleType(), float(node.i))
if isinstance(node, Ast_Ks.ks_litNumf):
return ir.Constant(ir.DoubleType(), float(node.f))
if isinstance(node, Ast_Ks.ks_funccall):
return self._codegen_funccall(node)
if isinstance(node, Ast_Ks.ks_forcmd):
return self._codegen_forcmd(node)
if isinstance(node, Ast_Ks.ks_negitem):
if isinstance(node.v, Ast_Ks.ks_litNumf):
return ir.Constant(ir.DoubleType(), -float(node.v.f))
value = self._codegen(node.v)
zero = ir.Constant(ir.DoubleType(), 0.0)
return self.builder.fsub(zero, value)
print 'not find _codegen for', node
assert False
def _codegen_Module(self, node):
n = 0
for v in node.vlst:
if isinstance(v, Ast_Ks.ks_funcdef):
self._codegen(v)
continue
n += 1
name = '_anon%d' % n
self.func_symtab = {}
func_ty = ir.FunctionType(ir.DoubleType(),[])
func = ir.Function(self.module, func_ty, name)
bb_entry = func.append_basic_block('entry')
self.builder = ir.IRBuilder(bb_entry)
retval = self._codegen(v)
self.builder.ret(retval)
return name
def _codegen_funcdef(self, node):
self.func_symtab = {}
func = self._codegen_PrototypeAST(node)
bb_entry = func.append_basic_block('entry')
self.builder = ir.IRBuilder(bb_entry)
retval = self._codegen(node.v) # node.v is body
self.builder.ret(retval)
return func
def _codegen_ifcmd(self, node):
lst = [(node.v1, node.v2)]
for elsecl in node.vlst:
lst.append((elsecl.v1, elsecl.v2))
return self._codegen_ifcmd_1(lst, node.v4)
def _codegen_ifcmd_1(self, lst, last_else):
(cond_expr, then_expr) = lst.pop(0)
cond_val = self._codegen(cond_expr)
cmp = self.builder.icmp_unsigned('!=', cond_val, ir.Constant(ir.IntType(1), 0))
then_bb = self.builder.function.append_basic_block('then')
else_bb = ir.Block(self.builder.function, 'else')
merge_bb = ir.Block(self.builder.function, 'ifcont')
self.builder.cbranch(cmp, then_bb, else_bb)
self.builder.position_at_start(then_bb)
then_val = self._codegen(then_expr)
self.builder.branch(merge_bb)
then_bb = self.builder.block
self.builder.function.basic_blocks.append(else_bb)
self.builder.position_at_start(else_bb)
if not lst:
else_val = self._codegen(last_else)
else:
else_val = self._codegen_ifcmd_1(lst, last_else)
else_bb = self.builder.block
self.builder.branch(merge_bb)
self.builder.function.basic_blocks.append(merge_bb)
self.builder.position_at_start(merge_bb)
phi = self.builder.phi(ir.DoubleType(), 'iftmp')
phi.add_incoming(then_val, then_bb)
phi.add_incoming(else_val, else_bb)
return phi
def _codegen_ifcmd_2(self, lst, last_else):
(cond_val, then_val) = lst.pop(0)
cmp = self.builder.icmp_unsigned('!=', cond_val, ir.Constant(ir.IntType(1), 0))
then_bb = self.builder.function.append_basic_block('then')
else_bb = ir.Block(self.builder.function, 'else')
merge_bb = ir.Block(self.builder.function, 'ifcont')
self.builder.cbranch(cmp, then_bb, else_bb)
self.builder.position_at_start(then_bb)
self.builder.branch(merge_bb)
then_bb = self.builder.block
self.builder.function.basic_blocks.append(else_bb)
self.builder.position_at_start(else_bb)
if not lst:
else_val = last_else
else:
else_val = self._codegen_ifcmd_2(lst, last_else)
else_bb = self.builder.block
self.builder.branch(merge_bb)
self.builder.function.basic_blocks.append(merge_bb)
self.builder.position_at_start(merge_bb)
phi = self.builder.phi(ir.IntType(32), 'iftmp')
phi.add_incoming(then_val, then_bb)
phi.add_incoming(else_val, else_bb)
return phi
def _codegen_PrototypeAST(self, node):
funcname = node.n
func_ty = ir.FunctionType(ir.DoubleType(), [ir.DoubleType()] * len(node.vlst))
# If a function with this name already exists in the module...
if funcname in self.module.globals:
# We only allow the case in which a declaration exists and now the
# function is defined (or redeclared) with the same number of args.
existing_func = self.module[funcname]
if not isinstance(existing_func, ir.Function):
raise CodegenError('Function/Global name collision', funcname)
if not existing_func.is_declaration():
raise CodegenError('Redifinition of {0}'.format(funcname))
if len(existing_func.function_type.args) != len(func_ty.args):
raise CodegenError('Redifinition with different number of arguments')
func = self.module.globals[funcname]
else:
# Otherwise create a new function
func = ir.Function(self.module, func_ty, funcname)
# Set function argument names from AST
for i, arg in enumerate(func.args):
arg.name = node.vlst[i].n
self.func_symtab[arg.name] = arg
return func
def _codegen_funccall(self, node):
callee = node.n
callee_func = self.module.get_global(callee)
if callee_func is None or not isinstance(callee_func, ir.Function):
raise CodegenError('Call to unknown function', callee)
len_args = 0
if node.vq:
len_args = len(node.vq.vlst)
if len(callee_func.args) != len_args:
raise CodegenError('Call argument length mismatch', callee)
call_args = []
if node.vq:
call_args = [self._codegen(arg) for arg in node.vq.vlst]
return self.builder.call(callee_func, call_args, 'calltmp')
def _codegen_forcmd(self, node):
# Output this as:
# ...
# start = startexpr
# goto loop
# loop:
# variable = phi [start, loopheader], [nextvariable, loopend]
# ...
# bodyexpr
# ...
# loopend:
# step = stepexpr
# nextvariable = variable + step
# endcond = endexpr
# br endcond, loop, endloop
# outloop:
# Emit the start expr first, without the variable in scope.
id_name = node.v1.n
start_expr = node.v1.v
if isinstance(node.v4.v, Ast_Ks.ks_forbody1):
body = node.v4.v.v
else:
body = node.v4.v
step_expr = node.v3
end_expr = node.v2
start_val = self._codegen(start_expr)
preheader_bb = self.builder.block
loop_bb = self.builder.function.append_basic_block('loop')
# Insert an explicit fall through from the current block to loop_bb
self.builder.branch(loop_bb)
self.builder.position_at_start(loop_bb)
# Start the PHI node with an entry for start
phi = self.builder.phi(ir.DoubleType(), id_name)
phi.add_incoming(start_val, preheader_bb)
# Within the loop, the variable is defined equal to the PHI node. If it
# shadows an existing variable, we have to restore it, so save it now.
oldval = self.func_symtab.get(id_name)
self.func_symtab[id_name] = phi
# Emit the body of the loop. This, like any other expr, can change the
# current BB. Note that we ignore the value computed by the body.
body_val = self._codegen(body)
if step_expr is None:
stepval = ir.Constant(ir.DoubleType(), 1.0)
else:
stepval = self._codegen(step_expr)
nextvar = self.builder.fadd(phi, stepval, 'nextvar')
# Compute the end condition
endcond = self._codegen(end_expr)
cmp = self.builder.icmp_unsigned('!=', endcond, ir.Constant(ir.IntType(1), 0), 'loopcond')
# Create the 'after loop' block and insert it
loop_end_bb = self.builder.block
after_bb = self.builder.function.append_basic_block('afterloop')
# Insert the conditional branch into the end of loop_end_bb
self.builder.cbranch(cmp, loop_bb, after_bb)
# New code will be inserted into after_bb
self.builder.position_at_start(after_bb)
# Add a new entry to the PHI node for the backedge
phi.add_incoming(nextvar, loop_end_bb)
# Remove the loop variable from the symbol table; if it shadowed an
# existing variable, restore that.
if oldval is None:
del self.func_symtab[id_name]
else:
self.func_symtab[id_name] = oldval
# The 'for' expression always returns 0
return ir.Constant(ir.DoubleType(), 0.0)
class KaleidoscopeEvaluator(object):
def __init__(self):
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
self.codegen = LLVMCodeGenerator()
self._add_builtins(self.codegen.module)
self.target = llvm.Target.from_default_triple()
def evaluate(self, ast):
return self.codegen.generate_code(ast)
def _add_builtins(self, module):
putchar_ty = ir.FunctionType(ir.IntType(32), [ir.IntType(32)])
putchar = ir.Function(module, putchar_ty, 'putchar')
putchard_ty = ir.FunctionType(ir.DoubleType(), [ir.DoubleType()])
putchard = ir.Function(module, putchard_ty, 'putchard')
irbuilder = ir.IRBuilder(putchard.append_basic_block('entry'))
ival = irbuilder.fptoui(putchard.args[0], ir.IntType(32), 'intcast')
irbuilder.call(putchar, [ival])
irbuilder.ret(ir.Constant(ir.DoubleType(), 0))
def generate_mandelbrot(codestr, optimize=False, llvmdump=False, asmdump=False):
e = KaleidoscopeEvaluator()
ast = Ast_Ks.Test_Parse_ks(codestr)
main_name = e.evaluate(ast)
if llvmdump:
print('======== Unoptimized LLVM IR')
print(str(e.codegen.module))
ss = str(e.codegen.module)
llvmmod = llvm.parse_assembly(ss)
# Optimize the module
if optimize:
pmb = llvm.create_pass_manager_builder()
pmb.opt_level = 2
pm = llvm.create_module_pass_manager()
pmb.populate(pm)
pm.run(llvmmod)
if llvmdump:
print('======== Optimized LLVM IR')
print(str(llvmmod))
target_machine = e.target.create_target_machine()
with llvm.create_mcjit_compiler(llvmmod, target_machine) as ee:
ee.finalize_object()
if asmdump:
print('======== Machine code')
print(target_machine.emit_assembly(llvmmod))
fptr = CFUNCTYPE(c_double)(ee.get_function_address(main_name))
result = fptr()
return result
if __name__ == '__main__':
generate_mandelbrot(Ast_Ks.s_sample_ks)
| 36.662011 | 98 | 0.613638 |
d28ab9285c945ea85e280b117634dd37169b2893 | 5,540 | py | Python | code/deep/finetune_AlexNet_ResNet/finetune_office31.py | PatrickZH/transferlearning | 7fdb2280dbe0a79335eca290f808f034fd441b67 | [
"MIT"
] | 5 | 2019-04-06T08:17:15.000Z | 2019-09-06T01:24:46.000Z | code/deep/finetune_AlexNet_ResNet/finetune_office31.py | ZoeLin1130/transferlearning | 66e4b9a5747636f1e94512c8397683b3dc44d2ef | [
"MIT"
] | null | null | null | code/deep/finetune_AlexNet_ResNet/finetune_office31.py | ZoeLin1130/transferlearning | 66e4b9a5747636f1e94512c8397683b3dc44d2ef | [
"MIT"
] | 3 | 2019-10-02T15:46:47.000Z | 2020-06-17T13:45:27.000Z | from __future__ import print_function
import argparse
import data_loader
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import time
# Command setting
parser = argparse.ArgumentParser(description='Finetune')
parser.add_argument('-model', '-m', type=str, help='model name', default='resnet')
parser.add_argument('-batchsize', '-b', type=int, help='batch size', default=64)
parser.add_argument('-cuda', '-g', type=int, help='cuda id', default=0)
parser.add_argument('-source', '-src', type=str, default='amazon')
parser.add_argument('-target', '-tar', type=str, default='webcam')
args = parser.parse_args()
# Parameter setting
DEVICE = torch.device('cuda:' + str(args.cuda) if torch.cuda.is_available() else 'cpu')
N_CLASS = 31
LEARNING_RATE = 1e-4
BATCH_SIZE = {'src': int(args.batchsize), 'tar': int(args.batchsize)}
N_EPOCH = 100
MOMENTUM = 0.9
DECAY=5e-4
def load_model(name='alexnet'):
if name == 'alexnet':
model = torchvision.models.alexnet(pretrained=True)
n_features = model.classifier[6].in_features
fc = torch.nn.Linear(n_features, N_CLASS)
model.classifier[6] = fc
elif name == 'resnet':
model = torchvision.models.resnet50(pretrained=True)
n_features = model.fc.in_features
fc = torch.nn.Linear(n_features, N_CLASS)
model.fc = fc
return model
def get_optimizer(model_name):
learning_rate = LEARNING_RATE
if model_name == 'alexnet':
param_group = [{'params': model.features.parameters(), 'lr': learning_rate}]
for i in range(6):
param_group += [{'params': model.classifier[i].parameters(), 'lr': learning_rate}]
param_group += [{'params': model.classifier[6].parameters(), 'lr': learning_rate * 10}]
elif model_name == 'resnet':
param_group = []
for k, v in model.named_parameters():
if not k.__contains__('fc'):
param_group += [{'params': v, 'lr': learning_rate}]
else:
param_group += [{'params': v, 'lr': learning_rate * 10}]
optimizer = optim.SGD(param_group, momentum=MOMENTUM)
return optimizer
# Schedule learning rate
def lr_schedule(optimizer, epoch):
def lr_decay(LR, n_epoch, e):
return LR / (1 + 10 * e / n_epoch) ** 0.75
for i in range(len(optimizer.param_groups)):
if i < len(optimizer.param_groups) - 1:
optimizer.param_groups[i]['lr'] = lr_decay(LEARNING_RATE, N_EPOCH, epoch)
else:
optimizer.param_groups[i]['lr'] = lr_decay(LEARNING_RATE, N_EPOCH, epoch) * 10
def finetune(model, dataloaders, optimizer):
since = time.time()
best_acc = 0.0
acc_hist = []
criterion = nn.CrossEntropyLoss()
for epoch in range(1, N_EPOCH + 1):
# lr_schedule(optimizer, epoch)
print('Learning rate: {:.8f}'.format(optimizer.param_groups[0]['lr']))
print('Learning rate: {:.8f}'.format(optimizer.param_groups[-1]['lr']))
for phase in ['src', 'val', 'tar']:
if phase == 'src':
model.train()
else:
model.eval()
total_loss, correct = 0, 0
for inputs, labels in dataloaders[phase]:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'src'):
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.max(outputs, 1)[1]
if phase == 'src':
loss.backward()
optimizer.step()
total_loss += loss.item() * inputs.size(0)
correct += torch.sum(preds == labels.data)
epoch_loss = total_loss / len(dataloaders[phase].dataset)
epoch_acc = correct.double() / len(dataloaders[phase].dataset)
acc_hist.append([epoch_loss, epoch_acc])
print('Epoch: [{:02d}/{:02d}]---{}, loss: {:.6f}, acc: {:.4f}'.format(epoch, N_EPOCH, phase, epoch_loss,
epoch_acc))
if phase == 'tar' and epoch_acc > best_acc:
best_acc = epoch_acc
print()
fname = 'finetune_result' + model_name + str(LEARNING_RATE) + str(args.source) + '-' + str(args.target) + '.csv'
np.savetxt(fname, np.asarray(a=acc_hist, dtype=float), delimiter=',',
fmt='%.4f')
time_pass = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_pass // 60, time_pass % 60))
return model, best_acc, acc_hist
if __name__ == '__main__':
torch.manual_seed(10)
# Load data
root_dir = 'data/OFFICE31/'
domain = {'src': str(args.source), 'tar': str(args.target)}
dataloaders = {}
dataloaders['tar'] = data_loader.load_data(root_dir, domain['tar'], BATCH_SIZE['tar'], 'tar')
dataloaders['src'], dataloaders['val'] = data_loader.load_train(root_dir, domain['src'], BATCH_SIZE['src'], 'src')
print(len(dataloaders['src'].dataset), len(dataloaders['val'].dataset))
# Load model
model_name = str(args.model)
model = load_model(model_name).to(DEVICE)
print('Source:{}, target:{}, model: {}'.format(domain['src'], domain['tar'], model_name))
optimizer = get_optimizer(model_name)
model_best, best_acc, acc_hist = finetune(model, dataloaders, optimizer)
print('{}Best acc: {}'.format('*' * 10, best_acc))
| 40.735294 | 121 | 0.605415 |
7f0711f99eb55d9b57ff67832c9c98d4c40822fc | 438 | py | Python | Log_clearer.py | hack-e-d/FR-ERP | 9fe1c852e7a426309c238aabfba0bd8de7734429 | [
"MIT"
] | null | null | null | Log_clearer.py | hack-e-d/FR-ERP | 9fe1c852e7a426309c238aabfba0bd8de7734429 | [
"MIT"
] | null | null | null | Log_clearer.py | hack-e-d/FR-ERP | 9fe1c852e7a426309c238aabfba0bd8de7734429 | [
"MIT"
] | 1 | 2020-03-11T18:21:13.000Z | 2020-03-11T18:21:13.000Z | import mysql.connector
import datetime
import time
from tkinter import *
def Clear_attendance():
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="",
database="attendance"
)
print(mydb)
mycursor = mydb.cursor()
sql = "truncate table attendance_log"
mycursor.execute(sql)
print("The enteries of the previous day are cleared")
Clear_attendance() | 19.909091 | 58 | 0.646119 |
259a2dd5cf2b941866914cdbdaa656b2276a948e | 865 | py | Python | tests/test_subspecies.py | floralist/botaxon | 979bb525eac87eee07593332aaac1bfd96fe44e5 | [
"MIT"
] | null | null | null | tests/test_subspecies.py | floralist/botaxon | 979bb525eac87eee07593332aaac1bfd96fe44e5 | [
"MIT"
] | 1 | 2019-04-11T22:46:34.000Z | 2019-04-28T22:23:57.000Z | tests/test_subspecies.py | ggueret/botaxon | 979bb525eac87eee07593332aaac1bfd96fe44e5 | [
"MIT"
] | 1 | 2020-05-11T17:21:47.000Z | 2020-05-11T17:21:47.000Z | # -*- coding: utf-8 -*-
import pytest
import botaxon
@pytest.mark.parametrize("test_input", ["Argyranthemum frutescens subsp. canariae"])
def test_single_subspecies(test_input):
parsed = botaxon.load(test_input)
assert isinstance(parsed, botaxon.SubSpeciesResult)
assert botaxon.load(test_input).name == test_input
@pytest.mark.parametrize("test_input", ["Salix × ampherista subsp. yamatensis"])
def test_single_subspecies_with_hybrid_marker_on_species(test_input):
parsed = botaxon.load(test_input)
assert isinstance(parsed, botaxon.SubSpeciesResult)
assert botaxon.load(test_input).name == test_input
@pytest.mark.parametrize("test_input", ["some composed species subsp. something"])
def test_single_subspecies_with_composed_species(test_input):
with pytest.raises(botaxon.InvalidSpeciesError):
botaxon.load(test_input)
| 34.6 | 84 | 0.778035 |
94ebc74ad5bab311b46b1f843958a617db223aba | 4,040 | py | Python | data/make_boxplot.py | faroit/icassp_2016_slides | 601218e1d58b0f97fe750405f2b61dc78638fb6b | [
"MIT"
] | null | null | null | data/make_boxplot.py | faroit/icassp_2016_slides | 601218e1d58b0f97fe750405f2b61dc78638fb6b | [
"MIT"
] | null | null | null | data/make_boxplot.py | faroit/icassp_2016_slides | 601218e1d58b0f97fe750405f2b61dc78638fb6b | [
"MIT"
] | 1 | 2017-01-28T21:57:20.000Z | 2017-01-28T21:57:20.000Z | import numpy as np
import matplotlib.pyplot as plt
import argparse
import math
import pandas as pd
import seaborn as sns
import matplotlib as mpl
from scipy.io import loadmat
from matplotlib.transforms import BlendedGenericTransform
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Perform pitch estimation for given sensor data file.')
parser.add_argument(dest='input', type=str, default=None)
parser.add_argument(dest='output', type=str, default=None,
help='pdf')
args = parser.parse_args()
df = pd.read_pickle(args.input)
# only plot the results for k == 2
df = df.query('k == 2')
mat = loadmat('data/score_HRNMF.mat')
# adding results for HRNMF for Qa=1
df_mat = pd.DataFrame(
mat['score_HRNMF']['SDR'][0][0][:, 0], columns=['SDR']
)
df_mat['SIR'] = pd.DataFrame(
mat['score_HRNMF']['SIR'][0][0][:, 0], columns=['SIR']
)
df_mat['SAR'] = pd.DataFrame(
mat['score_HRNMF']['SAR'][0][0][:, 0], columns=['SAR']
)
df_mat['method'] = 'HRNMF'
df_mat['k'] = 2
df = df.append(df_mat, ignore_index=True)
df.method = df.method.str.upper()
# reshape data
# df = pd.melt(
# df, id_vars=['method'], value_vars=['SDR', 'SAR', 'SIR'],
# var_name='measure', value_name='score'
# )
plt.rc('text', usetex=True)
plt.rc('font', family='FiraSans')
mpl.rcParams['text.latex.preamble'] = [
r"\usepackage[sfdefault,scaled=.85]{FiraSans}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{textcomp}",
r"\usepackage[varqu,varl]{zi4}",
r"\usepackage{amsmath,amsthm}",
r"\usepackage[cmintegrals]{newtxsf}"
]
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.sans-serif'] = 'FiraSans'
mpl.rcParams['text.latex.unicode'] = 'True'
sns.set()
sns.set_context("paper")
sns.set_style(
"white", {
"font.family":
"serif", 'font.serif':
'ptmrr8re'
}
)
fig_width_pt = 244.6937 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = np.array([fig_width*2.5, fig_height*1.5])
params = {'backend': 'ps',
'axes.labelsize': 11,
'legend.fontsize': 11,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True,
'font.family': 'sans-serif',
'font.sans-serif': 'FiraSans',
'font.size': 11,
'figure.figsize': fig_size}
plt.rcParams.update(params)
measures = ['SDR', 'SIR', 'SAR']
f, ax = plt.subplots(1, len(measures), figsize=fig_size)
meanlineprops = dict(linestyle='dotted', linewidth=1.5, color='#2E2E2E')
for i, measure in enumerate(measures):
sns.boxplot(
"k",
measure,
hue='method',
data=df,
showmeans=True,
showfliers=False,
palette=sns.color_palette('muted'),
ax=ax[i],
width=1,
meanline=True,
meanprops=meanlineprops,
)
sns.despine(top=True, right=True)
lgd = ax[1].legend(
loc='lower center',
bbox_to_anchor=(0.5, -0.22),
bbox_transform=BlendedGenericTransform(f.transFigure, ax[1].transAxes),
ncol=6
)
ax[0].legend_ = None
ax[2].legend_ = None
ax[0].get_xaxis().set_ticks([])
ax[0].set_xlabel('')
ax[1].get_xaxis().set_ticks([])
ax[1].set_xlabel('')
ax[2].get_xaxis().set_ticks([])
ax[2].set_xlabel('')
f.set_tight_layout(True)
f.savefig(
args.output,
bbox_inches='tight',
bbox_extra_artists=(lgd,),
dpi=300
)
| 28.450704 | 79 | 0.569059 |
ac26743a7d827f3d230408bed3f1e05d9460d80c | 12,379 | py | Python | src/cpu/minor/BaseMinorCPU.py | yclin99/CS251A_final_gem5 | 391ca1d7c9484f4d58fce9a4424821dcbb2463ac | [
"BSD-3-Clause"
] | 1 | 2022-03-25T13:18:26.000Z | 2022-03-25T13:18:26.000Z | src/cpu/minor/BaseMinorCPU.py | yclin99/CS251A_final_gem5 | 391ca1d7c9484f4d58fce9a4424821dcbb2463ac | [
"BSD-3-Clause"
] | 1 | 2022-03-25T14:15:30.000Z | 2022-03-25T14:15:30.000Z | src/cpu/minor/BaseMinorCPU.py | ksco/gem5-xiangshan | 0baf1b5229885d81d689a677102f0665aaac5514 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012-2014, 2017-2018 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from m5.objects.BaseCPU import BaseCPU
from m5.objects.DummyChecker import DummyChecker
from m5.objects.BranchPredictor import *
from m5.objects.TimingExpr import TimingExpr
from m5.objects.FuncUnit import OpClass
class MinorOpClass(SimObject):
"""Boxing of OpClass to get around build problems and provide a hook for
future additions to OpClass checks"""
type = 'MinorOpClass'
cxx_header = "cpu/minor/func_unit.hh"
cxx_class = 'gem5::MinorOpClass'
opClass = Param.OpClass("op class to match")
class MinorOpClassSet(SimObject):
"""A set of matchable op classes"""
type = 'MinorOpClassSet'
cxx_header = "cpu/minor/func_unit.hh"
cxx_class = 'gem5::MinorOpClassSet'
opClasses = VectorParam.MinorOpClass([], "op classes to be matched."
" An empty list means any class")
class MinorFUTiming(SimObject):
type = 'MinorFUTiming'
cxx_header = "cpu/minor/func_unit.hh"
cxx_class = 'gem5::MinorFUTiming'
mask = Param.UInt64(0, "mask for testing ExtMachInst")
match = Param.UInt64(0, "match value for testing ExtMachInst:"
" (ext_mach_inst & mask) == match")
suppress = Param.Bool(False, "if true, this inst. is not executed by"
" this FU")
extraCommitLat = Param.Cycles(0, "extra cycles to stall commit for"
" this inst.")
extraCommitLatExpr = Param.TimingExpr(NULL, "extra cycles as a"
" run-time evaluated expression")
extraAssumedLat = Param.Cycles(0, "extra cycles to add to scoreboard"
" retire time for this insts dest registers once it leaves the"
" functional unit. For mem refs, if this is 0, the result's time"
" is marked as unpredictable and no forwarding can take place.")
srcRegsRelativeLats = VectorParam.Cycles("the maximum number of cycles"
" after inst. issue that each src reg can be available for this"
" inst. to issue")
opClasses = Param.MinorOpClassSet(MinorOpClassSet(),
"op classes to be considered for this decode. An empty set means any"
" class")
description = Param.String('', "description string of the decoding/inst."
" class")
def minorMakeOpClassSet(op_classes):
"""Make a MinorOpClassSet from a list of OpClass enum value strings"""
def boxOpClass(op_class):
return MinorOpClass(opClass=op_class)
return MinorOpClassSet(opClasses=[ boxOpClass(o) for o in op_classes ])
class MinorFU(SimObject):
type = 'MinorFU'
cxx_header = "cpu/minor/func_unit.hh"
cxx_class = 'gem5::MinorFU'
opClasses = Param.MinorOpClassSet(MinorOpClassSet(), "type of operations"
" allowed on this functional unit")
opLat = Param.Cycles(1, "latency in cycles")
issueLat = Param.Cycles(1, "cycles until another instruction can be"
" issued")
timings = VectorParam.MinorFUTiming([], "extra decoding rules")
cantForwardFromFUIndices = VectorParam.Unsigned([],
"list of FU indices from which this FU can't receive and early"
" (forwarded) result")
class MinorFUPool(SimObject):
type = 'MinorFUPool'
cxx_header = "cpu/minor/func_unit.hh"
cxx_class = 'gem5::MinorFUPool'
funcUnits = VectorParam.MinorFU("functional units")
class MinorDefaultIntFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntAlu'])
timings = [MinorFUTiming(description="Int",
srcRegsRelativeLats=[2])]
opLat = 3
class MinorDefaultIntMulFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntMult'])
timings = [MinorFUTiming(description='Mul',
srcRegsRelativeLats=[0])]
opLat = 3
class MinorDefaultIntDivFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntDiv'])
issueLat = 9
opLat = 9
class MinorDefaultFloatSimdFU(MinorFU):
opClasses = minorMakeOpClassSet([
'FloatAdd', 'FloatCmp', 'FloatCvt', 'FloatMisc', 'FloatMult',
'FloatMultAcc', 'FloatDiv', 'FloatSqrt',
'SimdAdd', 'SimdAddAcc', 'SimdAlu', 'SimdCmp', 'SimdCvt',
'SimdMisc', 'SimdMult', 'SimdMultAcc', 'SimdShift', 'SimdShiftAcc',
'SimdDiv', 'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', 'SimdFloatCmp',
'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', 'SimdFloatMult',
'SimdFloatMultAcc', 'SimdFloatSqrt', 'SimdReduceAdd', 'SimdReduceAlu',
'SimdReduceCmp', 'SimdFloatReduceAdd', 'SimdFloatReduceCmp',
'SimdAes', 'SimdAesMix',
'SimdSha1Hash', 'SimdSha1Hash2', 'SimdSha256Hash',
'SimdSha256Hash2', 'SimdShaSigma2', 'SimdShaSigma3'])
timings = [MinorFUTiming(description='FloatSimd',
srcRegsRelativeLats=[2])]
opLat = 6
class MinorDefaultPredFU(MinorFU):
opClasses = minorMakeOpClassSet(['SimdPredAlu'])
timings = [MinorFUTiming(description="Pred",
srcRegsRelativeLats=[2])]
opLat = 3
class MinorDefaultMemFU(MinorFU):
opClasses = minorMakeOpClassSet(['MemRead', 'MemWrite', 'FloatMemRead',
'FloatMemWrite'])
timings = [MinorFUTiming(description='Mem',
srcRegsRelativeLats=[1], extraAssumedLat=2)]
opLat = 1
class MinorDefaultMiscFU(MinorFU):
opClasses = minorMakeOpClassSet(['IprAccess', 'InstPrefetch'])
opLat = 1
class MinorDefaultFUPool(MinorFUPool):
funcUnits = [MinorDefaultIntFU(), MinorDefaultIntFU(),
MinorDefaultIntMulFU(), MinorDefaultIntDivFU(),
MinorDefaultFloatSimdFU(), MinorDefaultPredFU(),
MinorDefaultMemFU(), MinorDefaultMiscFU()]
class ThreadPolicy(Enum): vals = ['SingleThreaded', 'RoundRobin', 'Random']
class BaseMinorCPU(BaseCPU):
type = 'BaseMinorCPU'
cxx_header = "cpu/minor/cpu.hh"
cxx_class = 'gem5::MinorCPU'
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def require_caches(cls):
return True
@classmethod
def support_take_over(cls):
return True
threadPolicy = Param.ThreadPolicy('RoundRobin',
"Thread scheduling policy")
fetch1FetchLimit = Param.Unsigned(1,
"Number of line fetches allowable in flight at once")
fetch1LineSnapWidth = Param.Unsigned(0,
"Fetch1 'line' fetch snap size in bytes"
" (0 means use system cache line size)")
fetch1LineWidth = Param.Unsigned(0,
"Fetch1 maximum fetch size in bytes (0 means use system cache"
" line size)")
fetch1ToFetch2ForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch1 to Fetch2 (1 means next cycle)")
fetch1ToFetch2BackwardDelay = Param.Cycles(1,
"Backward cycle delay from Fetch2 to Fetch1 for branch prediction"
" signalling (0 means in the same cycle, 1 mean the next cycle)")
fetch2InputBufferSize = Param.Unsigned(2,
"Size of input buffer to Fetch2 in cycles-worth of insts.")
fetch2ToDecodeForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch2 to Decode (1 means next cycle)")
fetch2CycleInput = Param.Bool(True,
"Allow Fetch2 to cross input lines to generate full output each"
" cycle")
decodeInputBufferSize = Param.Unsigned(3,
"Size of input buffer to Decode in cycles-worth of insts.")
decodeToExecuteForwardDelay = Param.Cycles(1,
"Forward cycle delay from Decode to Execute (1 means next cycle)")
decodeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Decode (and implicitly"
" Decode's own width)")
decodeCycleInput = Param.Bool(True,
"Allow Decode to pack instructions from more than one input cycle"
" to fill its output each cycle")
executeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Execute")
executeCycleInput = Param.Bool(True,
"Allow Execute to use instructions from more than one input cycle"
" each cycle")
executeIssueLimit = Param.Unsigned(2,
"Number of issuable instructions in Execute each cycle")
executeMemoryIssueLimit = Param.Unsigned(1,
"Number of issuable memory instructions in Execute each cycle")
executeCommitLimit = Param.Unsigned(2,
"Number of committable instructions in Execute each cycle")
executeMemoryCommitLimit = Param.Unsigned(1,
"Number of committable memory references in Execute each cycle")
executeInputBufferSize = Param.Unsigned(7,
"Size of input buffer to Execute in cycles-worth of insts.")
executeMemoryWidth = Param.Unsigned(0,
"Width (and snap) in bytes of the data memory interface. (0 mean use"
" the system cacheLineSize)")
executeMaxAccessesInMemory = Param.Unsigned(2,
"Maximum number of concurrent accesses allowed to the memory system"
" from the dcache port")
executeLSQMaxStoreBufferStoresPerCycle = Param.Unsigned(2,
"Maximum number of stores that the store buffer can issue per cycle")
executeLSQRequestsQueueSize = Param.Unsigned(1,
"Size of LSQ requests queue (address translation queue)")
executeLSQTransfersQueueSize = Param.Unsigned(2,
"Size of LSQ transfers queue (memory transaction queue)")
executeLSQStoreBufferSize = Param.Unsigned(5,
"Size of LSQ store buffer")
executeBranchDelay = Param.Cycles(1,
"Delay from Execute deciding to branch and Fetch1 reacting"
" (1 means next cycle)")
executeFuncUnits = Param.MinorFUPool(MinorDefaultFUPool(),
"FUlines for this processor")
executeSetTraceTimeOnCommit = Param.Bool(True,
"Set inst. trace times to be commit times")
executeSetTraceTimeOnIssue = Param.Bool(False,
"Set inst. trace times to be issue times")
executeAllowEarlyMemoryIssue = Param.Bool(True,
"Allow mem refs to be issued to the LSQ before reaching the head of"
" the in flight insts queue")
enableIdling = Param.Bool(True,
"Enable cycle skipping when the processor is idle\n");
branchPred = Param.BranchPredictor(TournamentBP(
numThreads = Parent.numThreads), "Branch Predictor")
def addCheckerCpu(self):
print("Checker not yet supported by MinorCPU")
exit(1)
| 42.249147 | 78 | 0.708054 |
39cdfbf4809b23f1a6607aabf0ae26b3297946f6 | 1,237 | py | Python | pxr/usd/plugin/usdMtlx/__init__.py | YuqiaoZhang/USD | bf3a21e6e049486441440ebf8c0387db2538d096 | [
"BSD-2-Clause"
] | 5 | 2017-11-06T10:21:18.000Z | 2020-04-02T13:20:55.000Z | pxr/usd/plugin/usdMtlx/__init__.py | YuqiaoZhang/USD | bf3a21e6e049486441440ebf8c0387db2538d096 | [
"BSD-2-Clause"
] | 1 | 2020-07-07T22:39:42.000Z | 2020-07-07T22:39:42.000Z | pxr/usd/plugin/usdMtlx/__init__.py | YuqiaoZhang/USD | bf3a21e6e049486441440ebf8c0387db2538d096 | [
"BSD-2-Clause"
] | null | null | null | #
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import _usdMtlx
from pxr import Tf
Tf.PrepareModule(_usdMtlx, locals())
del Tf
try:
from . import __DOC
__DOC.Execute(locals())
del __DOC
except Exception:
pass
| 35.342857 | 74 | 0.756669 |
637b4340836e7a085a60e221f9deb4822eb7967b | 991 | py | Python | train.py | DanielLund/music-genre-classifier | a761b44bed9226cf40735d585285eb6916bfaad2 | [
"MIT"
] | null | null | null | train.py | DanielLund/music-genre-classifier | a761b44bed9226cf40735d585285eb6916bfaad2 | [
"MIT"
] | null | null | null | train.py | DanielLund/music-genre-classifier | a761b44bed9226cf40735d585285eb6916bfaad2 | [
"MIT"
] | null | null | null | from preprocessing import analyse_playlist, PCA
from KNN_model import knn_func as knn
url_breaks = ""
url_house = ""
breaks = analyse_playlist(url_breaks)
house = analyse_playlist(url_house)
PCA_breaks = PCA("breaks.xlsx")
PCA_house = PCA("house.xlsx")
labelled_breaks = pd.read_excel("breaks.xlsx", index_col=0, usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
labelled_house = pd.read_excel("house.xlsx", index_col=0, usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
labelled_breaks["Class"] = 0
labelled_house["Class"] = 1
full_data = labelled_breaks.append(labelled_house[:24], ignore_cols=True)
full_data["Key"] = (full_data["Key"] / full_data["Key"].max())
full_data["Tempo"] = (full_data["Tempo"] / full_data["Tempo"].max())
full_data["Loudness"] = (full_data["Loudness"] / full_data["Loudness"].max())
full_data_random = full_data.sample(frac=1)
x_train = full_data_random[0:24]
y_train = x_train["Class"].values
x_train = x_train.drop("Class", axis=1)
knn(x_train, y_train) | 30.96875 | 107 | 0.711403 |
28fd2ad35609b2d53ff646e88e9de27ba133be88 | 8,171 | py | Python | tests/unit/search/test_tasks.py | uranusjr/warehouse | d0a88279ad1f9a80e4adc60bbe0c37abbb7f07e7 | [
"Apache-2.0"
] | null | null | null | tests/unit/search/test_tasks.py | uranusjr/warehouse | d0a88279ad1f9a80e4adc60bbe0c37abbb7f07e7 | [
"Apache-2.0"
] | null | null | null | tests/unit/search/test_tasks.py | uranusjr/warehouse | d0a88279ad1f9a80e4adc60bbe0c37abbb7f07e7 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import packaging.version
import pretend
import pytest
from first import first
import warehouse.search.tasks
from warehouse.search.tasks import reindex, _project_docs
from ...common.db.packaging import ProjectFactory, ReleaseFactory
def test_project_docs(db_session):
projects = [ProjectFactory.create() for _ in range(2)]
releases = {
p: sorted(
[ReleaseFactory.create(project=p) for _ in range(3)],
key=lambda r: packaging.version.parse(r.version),
reverse=True,
)
for p in projects
}
assert list(_project_docs(db_session)) == [
{
"_id": p.normalized_name,
"_type": "doc",
"_source": {
"created": p.created,
"name": p.name,
"normalized_name": p.normalized_name,
"version": [r.version for r in prs],
"latest_version": first(prs, key=lambda r: not r.is_prerelease).version,
},
}
for p, prs in sorted(releases.items(), key=lambda x: x[0].name.lower())
]
class FakeESIndices:
def __init__(self):
self.indices = {}
self.aliases = {}
self.put_settings = pretend.call_recorder(lambda *a, **kw: None)
self.delete = pretend.call_recorder(lambda *a, **kw: None)
self.create = pretend.call_recorder(lambda *a, **kw: None)
def exists_alias(self, name):
return name in self.aliases
def get_alias(self, name):
return self.aliases[name]
def put_alias(self, name, index):
self.aliases.setdefault(name, []).append(index)
def remove_alias(self, name, alias):
self.aliases[name] = [n for n in self.aliases[name] if n != alias]
if not self.aliases[name]:
del self.aliases[name]
def update_aliases(self, body):
for items in body["actions"]:
for action, values in items.items():
if action == "add":
self.put_alias(values["alias"], values["index"])
elif action == "remove":
self.remove_alias(values["alias"], values["index"])
else:
raise ValueError("Unknown action: {!r}.".format(action))
class FakeESClient:
def __init__(self):
self.indices = FakeESIndices()
class TestReindex:
def test_fails_when_raising(self, db_request, monkeypatch):
docs = pretend.stub()
def project_docs(db):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
db_request.registry.update({"elasticsearch.index": "warehouse"})
db_request.registry.settings = {"elasticsearch.url": "http://some.url"}
monkeypatch.setattr(
warehouse.search.tasks.elasticsearch,
"Elasticsearch",
lambda *a, **kw: es_client,
)
class TestException(Exception):
pass
def parallel_bulk(client, iterable, index=None):
assert client is es_client
assert iterable is docs
assert index == "warehouse-cbcbcbcbcb"
raise TestException
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(os, "urandom", lambda n: b"\xcb" * n)
with pytest.raises(TestException):
reindex(db_request)
assert es_client.indices.delete.calls == [
pretend.call(index="warehouse-cbcbcbcbcb")
]
assert es_client.indices.put_settings.calls == []
def test_successfully_indexes_and_adds_new(self, db_request, monkeypatch):
docs = pretend.stub()
def project_docs(db):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
db_request.registry.update(
{"elasticsearch.index": "warehouse", "elasticsearch.shards": 42}
)
db_request.registry.settings = {"elasticsearch.url": "http://some.url"}
monkeypatch.setattr(
warehouse.search.tasks.elasticsearch,
"Elasticsearch",
lambda *a, **kw: es_client,
)
parallel_bulk = pretend.call_recorder(lambda client, iterable, index: [None])
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(os, "urandom", lambda n: b"\xcb" * n)
reindex(db_request)
assert parallel_bulk.calls == [
pretend.call(es_client, docs, index="warehouse-cbcbcbcbcb")
]
assert es_client.indices.create.calls == [
pretend.call(
body={
"settings": {
"number_of_shards": 42,
"number_of_replicas": 0,
"refresh_interval": "-1",
}
},
wait_for_active_shards=42,
index="warehouse-cbcbcbcbcb",
)
]
assert es_client.indices.delete.calls == []
assert es_client.indices.aliases == {"warehouse": ["warehouse-cbcbcbcbcb"]}
assert es_client.indices.put_settings.calls == [
pretend.call(
index="warehouse-cbcbcbcbcb",
body={"index": {"number_of_replicas": 0, "refresh_interval": "1s"}},
)
]
def test_successfully_indexes_and_replaces(self, db_request, monkeypatch):
docs = pretend.stub()
def project_docs(db):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
es_client.indices.indices["warehouse-aaaaaaaaaa"] = None
es_client.indices.aliases["warehouse"] = ["warehouse-aaaaaaaaaa"]
db_engine = pretend.stub()
db_request.registry.update(
{
"elasticsearch.index": "warehouse",
"elasticsearch.shards": 42,
"sqlalchemy.engine": db_engine,
}
)
db_request.registry.settings = {"elasticsearch.url": "http://some.url"}
monkeypatch.setattr(
warehouse.search.tasks.elasticsearch,
"Elasticsearch",
lambda *a, **kw: es_client,
)
parallel_bulk = pretend.call_recorder(lambda client, iterable, index: [None])
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(os, "urandom", lambda n: b"\xcb" * n)
reindex(db_request)
assert parallel_bulk.calls == [
pretend.call(es_client, docs, index="warehouse-cbcbcbcbcb")
]
assert es_client.indices.create.calls == [
pretend.call(
body={
"settings": {
"number_of_shards": 42,
"number_of_replicas": 0,
"refresh_interval": "-1",
}
},
wait_for_active_shards=42,
index="warehouse-cbcbcbcbcb",
)
]
assert es_client.indices.delete.calls == [pretend.call("warehouse-aaaaaaaaaa")]
assert es_client.indices.aliases == {"warehouse": ["warehouse-cbcbcbcbcb"]}
assert es_client.indices.put_settings.calls == [
pretend.call(
index="warehouse-cbcbcbcbcb",
body={"index": {"number_of_replicas": 0, "refresh_interval": "1s"}},
)
]
| 33.487705 | 88 | 0.586831 |
eb6f3e25632910146f641da5ef5145fe0bc0c16c | 12,438 | py | Python | intersight/models/workflow_build_task_meta_owner.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/workflow_build_task_meta_owner.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/workflow_build_task_meta_owner.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class WorkflowBuildTaskMetaOwner(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'service': 'str',
'workflow_types': 'list[str]'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'service': 'Service',
'workflow_types': 'WorkflowTypes'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, service=None, workflow_types=None):
"""
WorkflowBuildTaskMetaOwner - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._service = None
self._workflow_types = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if service is not None:
self.service = service
if workflow_types is not None:
self.workflow_types = workflow_types
@property
def account_moid(self):
"""
Gets the account_moid of this WorkflowBuildTaskMetaOwner.
The Account ID for this managed object.
:return: The account_moid of this WorkflowBuildTaskMetaOwner.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this WorkflowBuildTaskMetaOwner.
The Account ID for this managed object.
:param account_moid: The account_moid of this WorkflowBuildTaskMetaOwner.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this WorkflowBuildTaskMetaOwner.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this WorkflowBuildTaskMetaOwner.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this WorkflowBuildTaskMetaOwner.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this WorkflowBuildTaskMetaOwner.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this WorkflowBuildTaskMetaOwner.
The time when this managed object was created.
:return: The create_time of this WorkflowBuildTaskMetaOwner.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this WorkflowBuildTaskMetaOwner.
The time when this managed object was created.
:param create_time: The create_time of this WorkflowBuildTaskMetaOwner.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this WorkflowBuildTaskMetaOwner.
The time when this managed object was last modified.
:return: The mod_time of this WorkflowBuildTaskMetaOwner.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this WorkflowBuildTaskMetaOwner.
The time when this managed object was last modified.
:param mod_time: The mod_time of this WorkflowBuildTaskMetaOwner.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this WorkflowBuildTaskMetaOwner.
A unique identifier of this Managed Object instance.
:return: The moid of this WorkflowBuildTaskMetaOwner.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this WorkflowBuildTaskMetaOwner.
A unique identifier of this Managed Object instance.
:param moid: The moid of this WorkflowBuildTaskMetaOwner.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this WorkflowBuildTaskMetaOwner.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this WorkflowBuildTaskMetaOwner.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this WorkflowBuildTaskMetaOwner.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this WorkflowBuildTaskMetaOwner.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this WorkflowBuildTaskMetaOwner.
An array of owners which represent effective ownership of this object.
:return: The owners of this WorkflowBuildTaskMetaOwner.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this WorkflowBuildTaskMetaOwner.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this WorkflowBuildTaskMetaOwner.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this WorkflowBuildTaskMetaOwner.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this WorkflowBuildTaskMetaOwner.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this WorkflowBuildTaskMetaOwner.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this WorkflowBuildTaskMetaOwner.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this WorkflowBuildTaskMetaOwner.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this WorkflowBuildTaskMetaOwner.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this WorkflowBuildTaskMetaOwner.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this WorkflowBuildTaskMetaOwner.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this WorkflowBuildTaskMetaOwner.
The versioning info for this managed object
:return: The version_context of this WorkflowBuildTaskMetaOwner.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this WorkflowBuildTaskMetaOwner.
The versioning info for this managed object
:param version_context: The version_context of this WorkflowBuildTaskMetaOwner.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def service(self):
"""
Gets the service of this WorkflowBuildTaskMetaOwner.
:return: The service of this WorkflowBuildTaskMetaOwner.
:rtype: str
"""
return self._service
@service.setter
def service(self, service):
"""
Sets the service of this WorkflowBuildTaskMetaOwner.
:param service: The service of this WorkflowBuildTaskMetaOwner.
:type: str
"""
self._service = service
@property
def workflow_types(self):
"""
Gets the workflow_types of this WorkflowBuildTaskMetaOwner.
:return: The workflow_types of this WorkflowBuildTaskMetaOwner.
:rtype: list[str]
"""
return self._workflow_types
@workflow_types.setter
def workflow_types(self, workflow_types):
"""
Sets the workflow_types of this WorkflowBuildTaskMetaOwner.
:param workflow_types: The workflow_types of this WorkflowBuildTaskMetaOwner.
:type: list[str]
"""
self._workflow_types = workflow_types
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, WorkflowBuildTaskMetaOwner):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.925581 | 214 | 0.608056 |
5bc57a2dd141a3cc35b4fffbb66a49303243a538 | 1,041 | py | Python | openchain/adapters/base.py | manti-by/Openchain | 0e2bd5323b35f888f2939d30acab50bd46645e58 | [
"BSD-2-Clause"
] | 1 | 2019-03-07T20:42:37.000Z | 2019-03-07T20:42:37.000Z | openchain/adapters/base.py | manti-by/Openchain | 0e2bd5323b35f888f2939d30acab50bd46645e58 | [
"BSD-2-Clause"
] | null | null | null | openchain/adapters/base.py | manti-by/Openchain | 0e2bd5323b35f888f2939d30acab50bd46645e58 | [
"BSD-2-Clause"
] | null | null | null | import os
class Singleton(type):
instance_set = {}
def __call__(cls, *args, **kwargs):
if cls not in cls.instance_set:
cls.instance_set[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls.instance_set[cls]
class BaseAdapter(metaclass=Singleton):
db_path = os.getenv('DATABASE_PATH', '/var/lib/openchain/')
connection_set = {}
def connect(self, namespace: str):
raise NotImplementedError
def get(self, namespace: str, key: bytes) -> bytes:
raise NotImplementedError
def put(self, namespace, key: bytes, value: bytes):
raise NotImplementedError
def delete(self, namespace: str, key: bytes) -> bytes:
raise NotImplementedError
def iterator(self, namespace: str) -> callable:
raise NotImplementedError
def batch_put(self, namespace: str, item_list: list) -> callable:
raise NotImplementedError
def batch_delete(self, namespace: str, item_list: list) -> callable:
raise NotImplementedError
| 26.692308 | 83 | 0.667627 |
f13200a2440c3b2d01a0841c13dcc52ac32e8c69 | 1,173 | py | Python | custom_components/idiamant/const.py | clementprevot/home-assistant-idiamant | 2fc2703e4f2ec66a135a71d9a76121e2869a940f | [
"MIT"
] | 1 | 2022-02-27T14:52:56.000Z | 2022-02-27T14:52:56.000Z | custom_components/idiamant/const.py | clementprevot/home-assistant-idiamant | 2fc2703e4f2ec66a135a71d9a76121e2869a940f | [
"MIT"
] | 18 | 2022-02-27T13:38:34.000Z | 2022-03-30T06:32:27.000Z | custom_components/idiamant/const.py | clementprevot/home-assistant-idiamant | 2fc2703e4f2ec66a135a71d9a76121e2869a940f | [
"MIT"
] | null | null | null | """Constants used by the iDiamant component."""
from homeassistant.const import Platform
NAME = "iDiamant"
VERSION = "0.0.1"
DOMAIN = "idiamant"
DOMAIN_DATA = f"{DOMAIN}_data"
MANUFACTURER = "Netatmo"
DEFAULT_ATTRIBUTION = f"Data provided by {MANUFACTURER}"
PLATFORMS = [
Platform.COVER,
Platform.SENSOR,
]
BASE_API_URL = "https://api.netatmo.com"
API_PATH = "/api"
TIMEOUT = 10
ACCEPT_HEADER = "Accept"
ACCEPT_HEADER_JSON = "application/json"
AUTHORIZATION_HEADER = "Authorization"
AUTHORIZATION_HEADER_BEARER = "Bearer"
DEFAULT_HEADERS = {ACCEPT_HEADER: ACCEPT_HEADER_JSON}
OAUTH2_PATH = "/oauth2"
OAUTH2_AUTHORIZE_URL = BASE_API_URL + OAUTH2_PATH + "/authorize"
OAUTH2_TOKEN_URL = BASE_API_URL + OAUTH2_PATH + "/token"
SCOPES = [
"read_bubendorff",
"write_bubendorff",
]
MODEL_NBG = "Gateway"
MODEL_NBR = "Rolling shutter"
MODEL_NBO = "Orientable shutter"
MODEL_NBS = "Swinging shutter"
MODELS = {
"NBG": MODEL_NBG,
"NBR": MODEL_NBR,
"NBO": MODEL_NBO,
"NBS": MODEL_NBS,
}
TYPE_SECURITY = "security"
AUTH = "idiamant_auth"
DATA_HOMES = ("idiamant_homes",)
DATA_ROOMS = ("idiamant_rooms",)
DATA_MODULES = ("idiamant_modules",)
| 20.946429 | 64 | 0.726343 |
8243285c7737e7661cce42a8e43d3f3056f82314 | 1,173 | py | Python | smsbomber.py | Nxuser741/SMSBomber-python | 9df768427b0d22451b837d7ea5c68be2069b7142 | [
"Apache-2.0"
] | null | null | null | smsbomber.py | Nxuser741/SMSBomber-python | 9df768427b0d22451b837d7ea5c68be2069b7142 | [
"Apache-2.0"
] | null | null | null | smsbomber.py | Nxuser741/SMSBomber-python | 9df768427b0d22451b837d7ea5c68be2069b7142 | [
"Apache-2.0"
] | null | null | null | import requests
def start():
spanduk = "\n ______ __________ __ \n / __/ |/ / __/ _ )___ __ _ / / ___ ____\n _\ \/ /|_/ /\ \/ _ / _ \/ ' \/ _ \/ -_) __/\n/___/_/ /_/___/____/\___/_/_/_/_.__/\__/_/ \n"
print(spanduk)
def hajar(nope):
BASE = "PC Me"
head = {"LANG":"en", "Content-Type":"application/json; charset=UTF-8", "Content-Length":"44", "Host":"phr.gms.digital", "Connection":"close"}
body = {"memberToken":"", "receivers":str(nope)}
otw = requests.post(url=BASE, json=body, headers=head)
jasjus = otw.json()
if jasjus['status'] == True:
print("[SUCCESS]> Send To "+ str(nope))
else:
print("[FAILED]> Send To "+ str(nope))
otw.close()
start()
nomor = 0
while(nomor == 0):
try:
nomor = input("[INPUT]> Nomor : 0")
except:
print("[ERROR]> Masukan Hanya Nomor")
jumlah = 0
while(jumlah == 0):
try:
jumlah = input("[INPUT]> Jumlah :")
except:
print("[FAILED]> Masukan Hanya Angka")
if jumlah <= 0:
print("[INFO]> Jumlah di Set ke 1")
jumlah = 1
for love in range(jumlah):
hajar(nomor)
print("[INFO]> Bot Stoped")
| 30.076923 | 210 | 0.542199 |
abb43829551371539f0d868301b54097c9bc7da2 | 662 | py | Python | reporter/app/automate/job_listener.py | chef-partners/GCP-CSCC-Automate-Integration | 96517262a5e530cd5ec0cfe95f0fec9c3b7306ab | [
"Apache-2.0"
] | 5 | 2019-06-17T05:05:00.000Z | 2022-02-11T17:25:24.000Z | reporter/app/automate/job_listener.py | chef-partners/GCP-CSCC-Automate-Integration | 96517262a5e530cd5ec0cfe95f0fec9c3b7306ab | [
"Apache-2.0"
] | null | null | null | reporter/app/automate/job_listener.py | chef-partners/GCP-CSCC-Automate-Integration | 96517262a5e530cd5ec0cfe95f0fec9c3b7306ab | [
"Apache-2.0"
] | 5 | 2018-12-06T14:09:02.000Z | 2022-02-11T17:25:28.000Z | from apscheduler.schedulers.background import BackgroundScheduler, BlockingScheduler
from automate.automate_service import AutomateService
from cscc.cscc_service import CsccService
import logging
import sys
class JobListener(BackgroundScheduler):
logger = logging.getLogger(__name__)
def listen(self, automateService, csccService):
reports = automateService.findReports()
failedControls = automateService.getFailedControls(reports)
findings = csccService.buildFindings(failedControls)
for finding in findings:
csccService.createFinding(finding)
if len(findings) > 0:
self.logger.info(f"Creating {len(findings)} findings")
| 36.777778 | 85 | 0.797583 |
5ea65d4873a5243d2a44e0d39d0611c949e980c6 | 3,307 | py | Python | clothing_recommender_project.py | rb2001/Clothing_Recommender | 48399911686e7cbb8c830340338ca59d0475815c | [
"Unlicense"
] | null | null | null | clothing_recommender_project.py | rb2001/Clothing_Recommender | 48399911686e7cbb8c830340338ca59d0475815c | [
"Unlicense"
] | null | null | null | clothing_recommender_project.py | rb2001/Clothing_Recommender | 48399911686e7cbb8c830340338ca59d0475815c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""Clothing_Recommender Project .ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nw0ewNdkx8o3WULAp2ynhHpbq1kVq7YZ
Clean the data and use input
"""
## Import and Organize Data ##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#read clean file (downloaded from Task 1)
df=pd.read_csv('CleanedData.csv', sep=',')
#Pivot table (clothingID, age, rating) - Nan is replaced with 0
train = df.pivot_table(index='Age', columns='ClothingID', values='Rating')
#sort train data
train = train.sort_values('Age', ascending=True)
###Create a greeting
print("Welcome, let us recommend a product for you")
#Take user input
Name =input('Please enter your name: ')
Age = int(input('Please enter your age: '))
CID_user = int(input("Enter Clothing ID: ")) #90
while CID_user not in train.columns:
print('Invalid: No data for ID')
CID_user = int(input("Enter valid Clothing ID: "))
rating_user = float(input("Enter Rating for Clothing ID: ")) #4
##use this later (if user has more than one rating to enter)
#entries = int(input("How many ratings will you enter? "))
#for x in range(entries):
#create array with user data
userArray = pd.DataFrame().reindex_like(train)
userArray.dropna(thresh=1,inplace=True)
userArray.loc[Age,CID_user] = rating_user #enter user data
from sklearn.metrics.pairwise import nan_euclidean_distances
#find euclidean distance between all rows of train and first row of test *ignores nan
distance = np.zeros((0,2)) #create empty array
for index, row in train.iterrows(): #iterate through each row of train
result = float(nan_euclidean_distances([userArray.loc[Age]], [train.loc[index]])) #compute the euclidean distance between two rows, *confirmed it works thru excel
result_array = [index, result] #place age and distance into an array
distance = np.append(distance,[result_array],axis= 0)
#convert array to a dataframe
dfDistance = pd.DataFrame({'Age': distance[:, 0], 'E-Distance': distance[:, 1]})
dfDistance.head()
k= 5
#sort by distance, reset the index
dfDistance = dfDistance.sort_values('E-Distance', ascending=True).head(20)
dfDistance = dfDistance.reset_index(drop=True)
dfDistance.drop(dfDistance[dfDistance.index > k-1].index, inplace=True)
dfDistance.head()
#NOTE: for calculating the predicted rating, could use an IDW Interpolation function shown here https://stackoverflow.com/questions/3104781/inverse-distance-weighted-idw-interpolation-with-python
#just using mean of each to test a solution, will come back and try more complex/accurate functions later
#assume k of 5####
k_array = pd.DataFrame().reindex_like(train)
meanArray = pd.DataFrame()
for x in dfDistance['Age']:
k_array = k_array.append([train.loc[x]]) #make array of the k closest ages
meanArray = meanArray.append(k_array.mean(),ignore_index = True).transpose()
meanArray.dropna(axis=0,inplace=True)
meanArray.columns = ["Mean"]
meanArray = meanArray[meanArray.Mean == 5]
recommend = list(meanArray.index.values)
print("recommended ClothingID's are: ")
print(recommend)
#feedback, clothingID (choose top 5), department
#reverse lookup clothingID for department
# feedback (choose first 3)
| 35.180851 | 195 | 0.752646 |
0c77519a818ee0a71b00b3fda5c4a9604e71d489 | 107,859 | py | Python | ken/lib/python3.8/site-packages/markdown2.py | kennedy-ben/One-Minuite-Spend | 33780ed420326e044c58e1b35b6d01af52b6e574 | [
"Unlicense",
"MIT"
] | 1 | 2019-07-09T04:57:14.000Z | 2019-07-09T04:57:14.000Z | virtual/lib/python3.6/site-packages/markdown2.py | annstella/blog | 1cdb7e7e7df028a84fae9b7d901116aae577589d | [
"MIT"
] | null | null | null | virtual/lib/python3.6/site-packages/markdown2.py | annstella/blog | 1cdb7e7e7df028a84fae9b7d901116aae577589d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
from __future__ import generators
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<https://github.com/trentm/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extra syntax options (see -x|--extras option below and
see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
* code-friendly: Disable _ and __ for em and strong.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* fenced-code-blocks: Allows a code block to not have to be indented
by fencing it with '```' on a line before and after. Based on
<http://github.github.com/github-flavored-markdown/> with support for
syntax highlighting.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports "img",
"table", "pre" and "code" tags. Add an issue if you require this for other
tags.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* metadata: Extract metadata from a leading '---'-fenced block.
See <https://github.com/trentm/python-markdown2/issues/77> for details.
* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
<http://en.wikipedia.org/wiki/Nofollow>.
* numbering: Support of generic counters. Non standard extension to
allow sequential numbering of figures, tables, equations, exhibits etc.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* spoiler: A special kind of blockquote commonly hidden behind a
click on SO. Syntax per <http://meta.stackexchange.com/a/72878>.
* tag-friendly: Requires atx style headers to have a space between the # and
the header text. Useful for applications that require twitter style tags to
pass through the parser.
* tables: Tables using the same format as GFM
<https://help.github.com/articles/github-flavored-markdown#tables> and
PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* use-file-vars: Look for an Emacs-style markdown-extras file variable to turn
on Extras.
* wiki-tables: Google Code Wiki-style tables. See
<http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
* xml: Passes one-liner processing instructions and namespaced XML tags.
"""
# Dev Notes:
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (2, 3, 4)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import sys
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
try:
from urllib import quote_plus
except ImportError:
from urllib.parse import quote_plus
# ---- Python version compat
if sys.version_info[:2] < (2, 4):
def reversed(sequence):
for i in sequence[::-1]:
yield i
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
# ---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_text(ch))
for ch in '\\`*_{}[]()>#+-.!'])
# ---- exceptions
class MarkdownError(Exception):
pass
# ---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
footnote_title=None, footnote_return_symbol=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
footnote_title=footnote_title,
footnote_return_symbol=footnote_return_symbol,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
footnote_title=None, footnote_return_symbol=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
footnote_title=footnote_title,
footnote_return_symbol=footnote_return_symbol,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None,
footnote_title=None, footnote_return_symbol=None,
use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and "header-ids" not in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.footnote_title = footnote_title
self.footnote_return_symbol = footnote_return_symbol
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_text('"')
self._escape_table["'"] = _hash_text("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
if "metadata" in self.extras:
self.metadata = {}
# Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
# should only be used in <a> tags with an "href" attribute.
_a_nofollow = re.compile(r"""
<(a)
(
[^>]*
href= # href is required
['"]? # HTML5 attribute values do not have to be quoted
[^#'"] # We don't want to match href values that start with # (like footnotes)
)
""",
re.IGNORECASE | re.VERBOSE
)
# Opens the linked document in a new window or tab
# should only used in <a> tags with an "href" attribute.
# same with _a_nofollow
_a_blank = _a_nofollow
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
# TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = text.replace("\r\n", "\n")
text = text.replace("\r", "\n")
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if "fenced-code-blocks" in self.extras and not self.safe_mode:
text = self._do_fenced_code_blocks(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
if "fenced-code-blocks" in self.extras and self.safe_mode:
text = self._do_fenced_code_blocks(text)
# Because numbering references aren't links (yet?) then we can do everything associated with counters
# before we get started
if "numbering" in self.extras:
text = self._do_numbering(text)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
if "target-blank-links" in self.extras:
text = self._a_blank.sub(r'<\1 target="_blank"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
def preprocess(self, text):
"""A hook for subclasses to do some preprocessing of the Markdown, if
desired. This is called after basic formatting of the text, but prior
to any extras, safe mode, etc. processing.
"""
return text
# Is metadata if the content starts with optional '---'-fenced `key: value`
# pairs. E.g. (indented for presentation):
# ---
# foo: bar
# another-var: blah blah
# ---
# # header
# or:
# foo: bar
# another-var: blah blah
#
# # header
_meta_data_pattern = re.compile(r'^(?:---[\ \t]*\n)?(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)|([\S\w]+\s*:(?! >)[ \t]*.*\n?)(?:---[\ \t]*\n)?', re.MULTILINE)
_key_val_pat = re.compile("[\S\w]+\s*:(?! >)[ \t]*.*\n?", re.MULTILINE)
# this allows key: >
# value
# conutiues over multiple lines
_key_val_block_pat = re.compile(
"(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)", re.MULTILINE)
_meta_data_fence_pattern = re.compile(r'^---[\ \t]*\n', re.MULTILINE)
_meta_data_newline = re.compile("^\n", re.MULTILINE)
def _extract_metadata(self, text):
if text.startswith("---"):
fence_splits = re.split(self._meta_data_fence_pattern, text, maxsplit=2)
metadata_content = fence_splits[1]
match = re.findall(self._meta_data_pattern, metadata_content)
if not match:
return text
tail = fence_splits[2]
else:
metadata_split = re.split(self._meta_data_newline, text, maxsplit=1)
metadata_content = metadata_split[0]
match = re.findall(self._meta_data_pattern, metadata_content)
if not match:
return text
tail = metadata_split[1]
kv = re.findall(self._key_val_pat, text)
kvm = re.findall(self._key_val_block_pat, text)
kvm = [item.replace(": >\n", ":", 1) for item in kvm]
for item in kv + kvm:
k, v = item.split(":", 1)
self.metadata[k.strip()] = v.strip()
return tail
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
# print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
def _detab_line(self, line):
r"""Recusively convert tabs to spaces in a single line.
Called from _detab()."""
if '\t' not in line:
return line
chunk1, chunk2 = line.split('\t', 1)
chunk1 += (' ' * (self.tab_width - len(chunk1) % self.tab_width))
output = chunk1 + chunk2
return self._detab_line(output)
def _detab(self, text):
r"""Iterate text line by line and convert tabs to spaces.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
output = []
for line in text.splitlines():
output.append(self._detab_line(line))
return '\n'.join(output)
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _do_numbering(self, text):
''' We handle the special extension for generic numbering for
tables, figures etc.
'''
# First pass to define all the references
self.regex_defns = re.compile(r'''
\[\#(\w+)\s* # the counter. Open square plus hash plus a word \1
([^@]*)\s* # Some optional characters, that aren't an @. \2
@(\w+) # the id. Should this be normed? \3
([^\]]*)\] # The rest of the text up to the terminating ] \4
''', re.VERBOSE)
self.regex_subs = re.compile(r"\[@(\w+)\s*\]") # [@ref_id]
counters = {}
references = {}
replacements = []
definition_html = '<figcaption class="{}" id="counter-ref-{}">{}{}{}</figcaption>'
reference_html = '<a class="{}" href="#counter-ref-{}">{}</a>'
for match in self.regex_defns.finditer(text):
# We must have four match groups otherwise this isn't a numbering reference
if len(match.groups()) != 4:
continue
counter = match.group(1)
text_before = match.group(2)
ref_id = match.group(3)
text_after = match.group(4)
number = counters.get(counter, 1)
references[ref_id] = (number, counter)
replacements.append((match.start(0),
definition_html.format(counter,
ref_id,
text_before,
number,
text_after),
match.end(0)))
counters[counter] = number + 1
for repl in reversed(replacements):
text = text[:repl[0]] + repl[1] + text[repl[2]:]
# Second pass to replace the references with the right
# value of the counter
# Fwiw, it's vaguely annoying to have to turn the iterator into
# a list and then reverse it but I can't think of a better thing to do.
for match in reversed(list(self.regex_subs.finditer(text))):
number, counter = references.get(match.group(1), (None, None))
if number is not None:
repl = reference_html.format(counter,
match.group(1),
number)
else:
repl = reference_html.format(match.group(1),
'countererror',
'?' + match.group(1) + '?')
if "smarty-pants" in self.extras:
repl = repl.replace('"', self._escape_table['"'])
text = text[:match.start()] + repl + text[match.end():]
return text
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
text = re.sub(self._hr_re, hr, text)
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
if "wiki-tables" in self.extras:
text = self._do_wiki_tables(text)
if "tables" in self.extras:
text = self._do_tables(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _table_sub(self, match):
trim_space_re = '^[ \t\n]+|[ \t\n]+$'
trim_bar_re = '^\||\|$'
split_bar_re = '^\||(?<!\\\\)\|'
escape_bar_re = '\\\\\|'
head, underline, body = match.groups()
# Determine aligns for columns.
cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", underline)))]
align_from_col_idx = {}
for col_idx, col in enumerate(cols):
if col[0] == ':' and col[-1] == ':':
align_from_col_idx[col_idx] = ' align="center"'
elif col[0] == ':':
align_from_col_idx[col_idx] = ' align="left"'
elif col[-1] == ':':
align_from_col_idx[col_idx] = ' align="right"'
# thead
hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<thead>', '<tr>']
cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", head)))]
for col_idx, col in enumerate(cols):
hlines.append(' <th%s>%s</th>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</thead>')
# tbody
hlines.append('<tbody>')
for line in body.strip('\n').split('\n'):
hlines.append('<tr>')
cols = [re.sub(escape_bar_re, '|', cell.strip()) for cell in re.split(split_bar_re, re.sub(trim_bar_re, "", re.sub(trim_space_re, "", line)))]
for col_idx, col in enumerate(cols):
hlines.append(' <td%s>%s</td>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</tbody>')
hlines.append('</table>')
return '\n'.join(hlines) + '\n'
def _do_tables(self, text):
"""Copying PHP-Markdown and GFM table syntax. Some regex borrowed from
https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538
"""
less_than_tab = self.tab_width - 1
table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^[ ]{0,%d} # allowed whitespace
(.*[|].*) \n # $1: header row (at least one pipe)
^[ ]{0,%d} # allowed whitespace
( # $2: underline row
# underline row with leading bar
(?: \|\ *:?-+:?\ * )+ \|? \n
|
# or, underline row without leading bar
(?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \n
)
( # $3: data rows
(?:
^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces
.*\|.* \n
)+
)
''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X)
return table_re.sub(self._table_sub, text)
def _wiki_table_sub(self, match):
ttext = match.group(0).strip()
# print 'wiki table: %r' % match.group(0)
rows = []
for line in ttext.splitlines(0):
line = line.strip()[2:-2].strip()
row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
rows.append(row)
# pprint(rows)
hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<tbody>']
for row in rows:
hrow = ['<tr>']
for cell in row:
hrow.append('<td>')
hrow.append(self._run_span_gamut(cell))
hrow.append('</td>')
hrow.append('</tr>')
hlines.append(''.join(hrow))
hlines += ['</tbody>', '</table>']
return '\n'.join(hlines) + '\n'
def _do_wiki_tables(self, text):
# Optimization.
if "||" not in text:
return text
less_than_tab = self.tab_width - 1
wiki_table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
(^\1\|\|.+?\|\|\n)* # any number of subsequent lines
''' % less_than_tab, re.M | re.X)
return wiki_table_re.sub(self._wiki_table_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
if "strike" in self.extras:
text = self._do_strike(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
if "break-on-newline" in self.extras:
text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
else:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in list(self.html_spans.items()):
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_inline_link_title = re.compile(r'''
( # \1
[ \t]+
(['"]) # quote char = \2
(?P<title>.*?)
\2
)? # title is optional
\)$
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
_whitespace = re.compile(r'\s*')
_strip_anglebrackets = re.compile(r'<(.*)>.*')
def _find_non_whitespace(self, text, start):
"""Returns the index of the first non-whitespace character in text
after (and including) start
"""
match = self._whitespace.match(text, start)
return match.end()
def _find_balanced(self, text, start, open_c, close_c):
"""Returns the index where the open_c and close_c characters balance
out - the same number of open_c and close_c are encountered - or the
end of string if it's reached before the balance point is found.
"""
i = start
l = len(text)
count = 1
while count > 0 and i < l:
if text[i] == open_c:
count += 1
elif text[i] == close_c:
count -= 1
i += 1
return i
def _extract_url_and_title(self, text, start):
"""Extracts the url and (optional) title from the tail of a link"""
# text[start] equals the opening parenthesis
idx = self._find_non_whitespace(text, start+1)
if idx == len(text):
return None, None, None
end_idx = idx
has_anglebrackets = text[idx] == "<"
if has_anglebrackets:
end_idx = self._find_balanced(text, end_idx+1, "<", ">")
end_idx = self._find_balanced(text, end_idx, "(", ")")
match = self._inline_link_title.search(text, idx, end_idx)
if not match:
return None, None, None
url, title = text[idx:match.start()], match.group("title")
if has_anglebrackets:
url = self._strip_anglebrackets.sub(r'\1', url)
return url, title, end_idx
_safe_protocols = re.compile(r'(https?|ftp):', re.I)
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
url, title, url_end_idx = self._extract_url_and_title(text, p)
if url is not None:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (_html_escape_url(url, safe_mode=self.safe_mode),
_xml_escape_attr(link_text),
title_str,
img_class_str,
self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
elif start_idx >= anchor_allowed_pos:
if self.safe_mode and not self._safe_protocols.match(url):
result_head = '<a href="#"%s>' % (title_str)
else:
result_head = '<a href="%s"%s>' % (_html_escape_url(url, safe_mode=self.safe_mode), title_str)
result = '%s%s</a>' % (result_head, _xml_escape_attr(link_text))
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (_html_escape_url(url, safe_mode=self.safe_mode),
_xml_escape_attr(link_text),
title_str,
img_class_str,
self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
if self.safe_mode and not self._safe_protocols.match(url):
result_head = '<a href="#"%s>' % (title_str)
else:
result_head = '<a href="%s"%s>' % (_html_escape_url(url, safe_mode=self.safe_mode), title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
if 0 == len(header_id):
header_id += '-%s' % self._count_from_header_id[header_id]
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, self._unescape_special_chars(name)))
_h_re_base = r'''
(^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
|
(^(\#{1,6}) # \1 = string of #'s
[ \t]%s
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
)
'''
_h_re = re.compile(_h_re_base % '*', re.X | re.M)
_h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
def _h_sub(self, match):
if match.group(1) is not None:
# Setext header
n = {"=": 1, "-": 2}[match.group(3)[0]]
header_group = match.group(2)
else:
# atx header
n = len(match.group(5))
header_group = match.group(6)
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(header_group,
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(header_group)
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
if 'tag-friendly' in self.extras:
return self._h_re_tag_friendly.sub(self._h_sub, text)
return self._h_re.sub(self._h_sub, text)
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
# Iterate over each *non-overlapping* list match.
pos = 0
while True:
# Find the *first* hit for either list style (ul or ol). We
# match ul and ol separately to avoid adjacent lists of different
# types running into each other (see issue #16).
hits = []
for marker_pat in (self._marker_ul, self._marker_ol):
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
if self.list_level: # sub-list
list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
match = list_re.search(text, pos)
if match:
hits.append((match.start(), match))
if not hits:
break
hits.sort()
match = hits[0][1]
start, end = match.span()
middle = self._list_sub(match)
text = text[:start] + middle + text[end:]
pos = start + len(middle) # start pos for next attempted match
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_task_list_item_re = re.compile(r'''
(\[[\ x]\])[ \t]+ # tasklist marker = \1
(.*) # list item text = \2
''', re.M | re.X | re.S)
_task_list_warpper_str = r'<input type="checkbox" class="task-list-item-checkbox" %sdisabled> %s'
def _task_list_item_sub(self, match):
marker = match.group(1)
item_text = match.group(2)
if marker == '[x]':
return self._task_list_warpper_str % ('checked ', item_text)
elif marker == '[ ]':
return self._task_list_warpper_str % ('', item_text)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
if "task_list" in self.extras:
item = self._task_list_item_re.sub(self._task_list_item_sub, item)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
def unhash_code(codeblock):
for key, sanitized in list(self.html_spans.items()):
codeblock = codeblock.replace(key, sanitized)
replacements = [
("&", "&"),
("<", "<"),
(">", ">")
]
for old, new in replacements:
codeblock = codeblock.replace(old, new)
return codeblock
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
codeblock = unhash_code( codeblock )
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
# Lookahead to make sure this block isn't already in a code block.
# Needed when syntax highlighting is being used.
(?![^<]*\</code\>)
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n+|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True)
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S)
def _do_strike(self, text):
text = self._strike_re.sub(r"<strike>\1</strike>", text)
return text
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_base = r'''
( # Wrap whole match in \1
(
^[ \t]*>%s[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
'''
_block_quote_re = re.compile(_block_quote_base % '', re.M | re.X)
_block_quote_re_spoiler = re.compile(_block_quote_base % '[ \t]*?!?', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M)
_bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M)
_bq_all_lines_spoilers = re.compile(r'\A(?:^[ \t]*>[ \t]*?!.*[\n\r]*)+\Z', re.M)
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
is_spoiler = 'spoiler' in self.extras and self._bq_all_lines_spoilers.match(bq)
# trim one level of quoting
if is_spoiler:
bq = self._bq_one_level_re_spoiler.sub('', bq)
else:
bq = self._bq_one_level_re.sub('', bq)
# trim whitespace-only lines
bq = self._ws_only_line_re.sub('', bq)
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
if is_spoiler:
return '<blockquote class="spoiler">\n%s\n</blockquote>\n\n' % bq
else:
return '<blockquote>\n%s\n</blockquote>\n\n' % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
if 'spoiler' in self.extras:
return self._block_quote_re_spoiler.sub(self._block_quote_sub, text)
else:
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
if not self.footnote_title:
self.footnote_title = "Jump back to footnote %d in the text."
if not self.footnote_return_symbol:
self.footnote_return_symbol = "↩"
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
try:
backlink = ('<a href="#fnref-%s" ' +
'class="footnoteBackLink" ' +
'title="' + self.footnote_title + '">' +
self.footnote_return_symbol +
'</a>') % (id, i+1)
except TypeError:
log.debug("Footnote error. `footnote_title` "
"must include parameter. Using defaults.")
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
# ---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
value = _slugify_strip_re.sub('', value).strip().lower()
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
def _html_escape_url(attr, safe_mode=False):
"""Replace special characters that are potentially malicious in url string."""
escaped = (attr
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if safe_mode:
escaped = escaped.replace('+', ' ')
escaped = escaped.replace("'", "'")
return escaped
# ---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<https://github.com/trentm/python-markdown2/wiki/Extras>")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
str(html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 40.579007 | 166 | 0.521913 |
15991e24854e18c98e58f4a47f6b0365c634099d | 3,413 | py | Python | server/app/settings.py | jleg13/Django-REST-API | 7e2c397ca3d49a320a79356c96b35beb86cc97ff | [
"MIT"
] | null | null | null | server/app/settings.py | jleg13/Django-REST-API | 7e2c397ca3d49a320a79356c96b35beb86cc97ff | [
"MIT"
] | null | null | null | server/app/settings.py | jleg13/Django-REST-API | 7e2c397ca3d49a320a79356c96b35beb86cc97ff | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v*4eer+_52u!rq_e1_5dcc8fe(j*d!+z2@pjzw2i%7@y+eh8jd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'gallery',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = '/vol/web/static/'
MEDIA_ROOT = '/vol/web/media/'
AUTH_USER_MODEL = 'core.User'
| 25.281481 | 91 | 0.685907 |
909ac29bf9044b1bd83f882c275bb30dee241531 | 42,546 | py | Python | device_base_class.py | labscript-suite-temp-2/blacs | 9959cfe0ebf44e4512bf8916407d581034356aad | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | device_base_class.py | labscript-suite-temp-2/blacs | 9959cfe0ebf44e4512bf8916407d581034356aad | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | device_base_class.py | labscript-suite-temp-2/blacs | 9959cfe0ebf44e4512bf8916407d581034356aad | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | #####################################################################
# #
# /device_base_class.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
import logging
import sys
import os
import time
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
import labscript_utils.excepthook
from qtutils import UiLoader
from blacs import BLACS_DIR
from blacs.tab_base_classes import Tab, Worker, define_state
from blacs.tab_base_classes import MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL, MODE_BUFFERED
from blacs.output_classes import AO, DO, DDS, Image
from labscript_utils.qtwidgets.toolpalette import ToolPaletteGroup
from labscript_utils.shared_drive import path_to_agnostic
class DeviceTab(Tab):
def __init__(self,notebook,settings,restart=False):
Tab.__init__(self,notebook,settings,restart)
self.connection_table = settings['connection_table']
# Create the variables we need
self._AO = {}
self._DO = {}
self._DDS = {}
self._image = {}
self._final_values = {}
self._last_programmed_values = {}
self._last_remote_values = {}
self._primary_worker = None
self._secondary_workers = []
self._can_check_remote_values = False
self._changed_radio_buttons = {}
# Call the initialise GUI function
self.initialise_GUI()
self.restore_save_data(self.settings['saved_data'] if 'saved_data' in self.settings else {})
self.initialise_workers()
self._last_programmed_values = self.get_front_panel_values()
if self._can_check_remote_values:
self.statemachine_timeout_add(30000,self.check_remote_values)
else:
# If we can check remote values, then no need to call program manual as
# the remote device will either be programmed correctly, or will need an
# inconsistency between local and remote values resolved
self.program_device()
def initialise_GUI(self):
# Override this function
pass
def initialise_workers(self):
# Override this function
# set the primary worker at this time
pass
@property
def primary_worker(self):
return self._primary_worker
@primary_worker.setter
def primary_worker(self,worker):
self._primary_worker = worker
def add_secondary_worker(self,worker):
if worker not in self._secondary_workers:
self._secondary_workers.append(worker)
def supports_remote_value_check(self,support):
self._can_check_remote_values = bool(support)
############################################################
# What do the properties dictionaries need to look like? #
############################################################
#
# digital_properties = {'hardware_channel_reference':{}, 'do0':{}}
#
#
# analog_properties = {'hardware_channel_reference':{'base_unit':'V',
# 'min':-10.0,
# 'max':10.0,
# 'step':0.01,
# 'decimals':3
# },
# 'ao1':{'base_unit':'V',
# 'min':-10.0,
# 'max':10.0,
# 'step':0.01,
# 'decimals':3
# },
# }
#
#
# dds_properties = {'hardware_channel_reference':{'freq':{'base_unit':'Hz',
# 'min':-10.0,
# 'max':10.0,
# 'step':0.01,
# 'decimals':3
# },
# 'amp':{'base_unit':'Vpp',
# 'min':0.0,
# 'max':1.0,
# 'step':0.1,
# 'decimals':3
# },
# 'phase':{'base_unit':'Degrees',
# 'min':0.0,
# 'max':360.0,
# 'step':1,
# 'decimals':2
# },
# 'gate':{},
# },
# 'dds1':{'freq':{'base_unit':'Hz',
# 'min':-10.0,
# 'max':10.0,
# 'step':0.01,
# 'decimals':3
# },
# 'amp':{'base_unit':'Vpp',
# 'min':0.0,
# 'max':1.0,
# 'step':0.1,
# 'decimals':3
# },
# 'phase':{'base_unit':'Degrees',
# 'min':0.0,
# 'max':360.0,
# 'step':1,
# 'decimals':2
# },
# 'gate':{},
# },
# }
#
def create_digital_outputs(self,digital_properties):
for hardware_name,properties in digital_properties.items():
# Save the DO object
self._DO[hardware_name] = self._create_DO_object(self.device_name,hardware_name,hardware_name,properties)
def _create_DO_object(self,parent_device,BLACS_hardware_name,labscript_hardware_name,properties):
# Find the connection name
device = self.get_child_from_connection_table(parent_device,labscript_hardware_name)
connection_name = device.name if device else '-'
# Instantiate the DO object
return DO(BLACS_hardware_name, connection_name, self.device_name, self.program_device, self.settings)
def create_image_outputs(self,image_properties):
for hardware_name,properties in image_properties.items():
# Save the DO object
self._image[hardware_name] = self._create_image_object(self.device_name,hardware_name,hardware_name,properties)
def _create_image_object(self,parent_device,BLACS_hardware_name,labscript_hardware_name,properties):
# Find the connection name
device = self.get_child_from_connection_table(parent_device,labscript_hardware_name)
connection_name = device.name if device else '-'
# sanitise properties dictionary
prop = {}
accepted_kwargs = ['width', 'height', 'x', 'y']
for kwarg in accepted_kwargs:
if kwarg in properties:
prop[kwarg] = properties[kwarg]
# Instantiate the DO object
return Image(BLACS_hardware_name, connection_name, self.device_name, self.program_device, self.settings, **prop)
def create_analog_outputs(self,analog_properties):
for hardware_name,properties in analog_properties.items():
# Create and save the AO object
self._AO[hardware_name] = self._create_AO_object(self.device_name,hardware_name,hardware_name,properties)
def _create_AO_object(self,parent_device,BLACS_hardware_name,labscript_hardware_name,properties):
# Find the connection name
device = self.get_child_from_connection_table(parent_device,labscript_hardware_name)
connection_name = device.name if device else '-'
# Get the calibration details
calib_class = None
calib_params = {}
if device:
# get the AO from the connection table, find its calibration details
calib_class = device.unit_conversion_class if device.unit_conversion_class != "None" else None
calib_params = device.unit_conversion_params
# Instantiate the AO object
return AO(BLACS_hardware_name, connection_name, self.device_name, self.program_device, self.settings, calib_class, calib_params,
properties['base_unit'], properties['min'], properties['max'], properties['step'], properties['decimals'])
def create_dds_outputs(self,dds_properties):
for hardware_name,properties in dds_properties.items():
device = self.get_child_from_connection_table(self.device_name,hardware_name)
connection_name = device.name if device else '-'
subchnl_name_list = ['freq','amp','phase']
sub_chnls = {}
for subchnl in subchnl_name_list:
if subchnl in properties:
# Create the AO object
sub_chnls[subchnl] = self._create_AO_object(connection_name,hardware_name+'_'+subchnl,subchnl,properties[subchnl])
if 'gate' in properties:
sub_chnls['gate'] = self._create_DO_object(connection_name,hardware_name+'_gate','gate',properties)
self._DDS[hardware_name] = DDS(hardware_name,connection_name,sub_chnls)
def get_child_from_connection_table(self, parent_device_name, port):
return self.connection_table.find_child(parent_device_name, port)
def create_digital_widgets(self,channel_properties):
widgets = {}
for hardware_name,properties in channel_properties.items():
properties.setdefault('args',[])
properties.setdefault('kwargs',{})
device = self.get_child_from_connection_table(self.device_name,hardware_name)
properties['kwargs']['inverted'] = bool(device.properties.get('inverted', False) if device else properties['kwargs'].get('inverted', False))
if hardware_name in self._DO:
widgets[hardware_name] = self._DO[hardware_name].create_widget(*properties['args'],**properties['kwargs'])
return widgets
def create_image_widgets(self,channel_properties):
widgets = {}
for hardware_name,properties in channel_properties.items():
properties.setdefault('args',[])
properties.setdefault('kwargs',{})
if hardware_name in self._image:
widgets[hardware_name] = self._image[hardware_name].create_widget(*properties['args'],**properties['kwargs'])
return widgets
def create_analog_widgets(self,channel_properties):
widgets = {}
for hardware_name,properties in channel_properties.items():
properties.setdefault('display_name',None)
properties.setdefault('horizontal_alignment',False)
properties.setdefault('parent',None)
if hardware_name in self._AO:
widgets[hardware_name] = self._AO[hardware_name].create_widget(properties['display_name'],properties['horizontal_alignment'],properties['parent'])
return widgets
def create_dds_widgets(self,channel_properties):
widgets = {}
for hardware_name,properties in channel_properties.items():
properties.setdefault('args',[])
properties.setdefault('kwargs',{})
if hardware_name in self._DDS:
widgets[hardware_name] = self._DDS[hardware_name].create_widget(*properties['args'],**properties['kwargs'])
return widgets
def auto_create_widgets(self):
dds_properties = {}
for channel,output in self._DDS.items():
dds_properties[channel] = {}
dds_widgets = self.create_dds_widgets(dds_properties)
ao_properties = {}
for channel,output in self._AO.items():
ao_properties[channel] = {}
ao_widgets = self.create_analog_widgets(ao_properties)
do_properties = {}
for channel,output in self._DO.items():
do_properties[channel] = {}
do_widgets = self.create_digital_widgets(do_properties)
image_properties = {}
for channel,output in self._image.items():
image_properties[channel] = {}
image_widgets = self.create_image_widgets(image_properties)
# Hack to maintain backwards compatibility with devices implemented
# prior to the introduction of the IMAGE output class
if self._image:
return dds_widgets, ao_widgets, do_widgets, image_widgets
else:
return dds_widgets, ao_widgets, do_widgets
def auto_place_widgets(self,*args):
widget = QWidget()
toolpalettegroup = ToolPaletteGroup(widget)
for arg in args:
# A default sort algorithm that just returns the object (this is equivalent to not specifying the sort gorithm)
sort_algorithm = lambda x: x
if type(arg) == type(()) and len(arg) > 1 and type(arg[1]) == type({}) and len(arg[1].keys()) > 0:
# we have a name, use it!
name = arg[0]
widget_dict = arg[1]
if len(arg) > 2:
sort_algorithm = arg[2]
else:
# ignore things that are not dictionaries or empty dictionaries
if type(arg) != type({}) or len(arg.keys()) < 1:
continue
if isinstance(self.get_channel(list(arg.keys())[0]),AO):
name = 'Analog Outputs'
elif isinstance(self.get_channel(list(arg.keys())[0]),DO):
name = 'Digital Outputs'
elif isinstance(self.get_channel(list(arg.keys())[0]),Image):
name = 'Image Outputs'
elif isinstance(self.get_channel(list(arg.keys())[0]),DDS):
name = 'DDS Outputs'
else:
# If it isn't DO, DDS or AO, we should forget about them and move on to the next argument
continue
widget_dict = arg
# Create tool palette
if toolpalettegroup.has_palette(name):
toolpalette = toolpalettegroup.get_palette(name)
else:
toolpalette = toolpalettegroup.append_new_palette(name)
for channel in sorted(widget_dict.keys(),key=sort_algorithm):
toolpalette.addWidget(widget_dict[channel],True)
# Add the widget containing the toolpalettegroup to the tab layout
self.get_tab_layout().addWidget(widget)
self.get_tab_layout().addItem(QSpacerItem(0,0,QSizePolicy.Minimum,QSizePolicy.MinimumExpanding))
# This method should be overridden in your device class if you want to save any data not
# stored in an AO, DO, Image or DDS object
# This method should return a dictionary, and this dictionary will be passed to the restore_save_data()
# method when the tab is initialised
def get_save_data(self):
return {}
# This method should be overridden in your device class if you want to restore data
# (saved by get_save_data()) when teh tab is initialised.
# You will be passed a dictionary of the form specified by your get_save_data() method
#
# Note: You must handle the case where the data dictionary is empty (or one or more keys are missing)
# This case will occur the first time BLACS is started on a PC, or if the BLACS datastore is destroyed
def restore_save_data(self,data):
return
def update_from_settings(self,settings):
Tab.update_from_settings(self, settings)
self.restore_save_data(settings['saved_data'])
self.settings = settings
for output in [self._AO, self._DO, self._image]:
for name,channel in output.items():
if not channel._locked:
channel._update_from_settings(settings)
for name,channel in self._DDS.items():
for subchnl_name in channel._sub_channel_list:
if hasattr(channel,subchnl_name):
subchnl = getattr(channel,subchnl_name)
if not subchnl._locked:
subchnl._update_from_settings(settings)
def get_front_panel_values(self):
return {channel:item.value for output in [self._AO,self._DO,self._image,self._DDS] for channel,item in output.items()}
def get_channel(self,channel):
if channel in self._AO:
return self._AO[channel]
elif channel in self._DO:
return self._DO[channel]
elif channel in self._image:
return self._image[channel]
elif channel in self._DDS:
return self._DDS[channel]
else:
return None
# Only allow this to be called when we are in MODE_MANUAL and keep it queued up if we are not
# When pulling out the state from the state queue, we check to see if there is an adjacent state that is more recent, and use that one
# or whichever is the latest without encountering a different state).
# This prevenets 'a million' calls to program_device from executing, potentially slowing down the system
@define_state(MODE_MANUAL,True,delete_stale_states=True)
def program_device(self):
self._last_programmed_values = self.get_front_panel_values()
# get rid of any "remote values changed" dialog
self._changed_widget.hide()
results = yield(self.queue_work(self._primary_worker,'program_manual',self._last_programmed_values))
for worker in self._secondary_workers:
if results:
returned_results = yield(self.queue_work(worker,'program_manual',self._last_programmed_values))
results.update(returned_results)
# If the worker process returns something, we assume it wants us to coerce the front panel values
if results:
for channel,remote_value in results.items():
if channel not in self._last_programmed_values:
raise RuntimeError('The worker function program_manual for device %s is returning data for channel %s but the BLACS tab is not programmed to handle this channel'%(self.device_name,channel))
output = self.get_channel(channel)
if output is None:
raise RuntimeError('The channel %s on device %s is in the last programmed values, but is not in the AO, DO or DDS output store. Something has gone badly wrong!'%(channel,self.device_name))
else:
# TODO: Only do this if the front panel values match what we asked to program (eg, the user hasn't changed the value since)
if output.value == self._last_programmed_values[channel]:
output.set_value(remote_value,program=False)
# Update the last_programmed_values
self._last_programmed_values[channel] = remote_value
@define_state(MODE_MANUAL,True)
def check_remote_values(self):
self._last_remote_values = yield(self.queue_work(self._primary_worker,'check_remote_values'))
for worker in self._secondary_workers:
if self._last_remote_values:
returned_results = yield(self.queue_work(worker,'check_remote_values'))
self._last_remote_values.update(returned_results)
# compare to current front panel values and prompt the user if they don't match
# We compare to the last_programmed values so that it doesn't get confused if the user has changed the value on the front panel
# and the program_manual command is still queued up
# If no results were returned, raise an exception so that we don't keep calling this function over and over again,
# filling up the text box with the same error, eventually consuming all CPU/memory of the PC
if not self._last_remote_values or type(self._last_remote_values) != type({}):
raise Exception('Failed to get remote values from device. Is it still connected?')
# A variable to indicate if any of the channels have a changed value
overall_changed = False
# A place to store radio buttons in
self._changed_radio_buttons = {}
# Clean up the previously used layout
while not self._ui.changed_layout.isEmpty():
item = self._ui.changed_layout.itemAt(0)
# This is the only way I could make the widget actually be removed.
# using layout.removeItem/removeWidget causes the layout to still draw the old item in its original space, and
# then draw new items over the top of the old. Very odd behaviour, could be a windows 8 bug I suppose!
item.widget().setParent(None)
#TODO: somehow maintain the state of the radio buttons for specific channels between refreshes of this changed dialog.
# TODO: Use the proper sort algorithm as defined for placing widgets to order this prompt
# We expect a dictionary of channel:value pairs
for channel in sorted(self._last_remote_values):
remote_value = self._last_remote_values[channel]
if channel not in self._last_programmed_values:
raise RuntimeError('The worker function check_remote_values for device %s is returning data for channel %s but the BLACS tab is not programmed to handle this channel'%(self.device_name,channel))
# A variable to indicate if this channel has changed
changed = False
if channel in self._DDS:
front_value = self._last_programmed_values[channel]
# format the entries for the DDS object correctly, then compare
front_values_formatted = {}
remote_values_formatted = {}
for sub_chnl in front_value:
if sub_chnl not in remote_value:
raise RuntimeError('The worker function check_remote_values has not returned data for the sub-channel %s in channel %s'%(sub_chnl,channel))
if sub_chnl == 'gate':
front_values_formatted[sub_chnl] = str(bool(int(front_value[sub_chnl])))
remote_values_formatted[sub_chnl] = str(bool(int(remote_value[sub_chnl])))
else:
decimals = self._DDS[channel].__getattribute__(sub_chnl)._decimals
front_values_formatted[sub_chnl] = ("%."+str(decimals)+"f")%front_value[sub_chnl]
remote_values_formatted[sub_chnl] = ("%."+str(decimals)+"f")%remote_value[sub_chnl]
if front_values_formatted[sub_chnl] != remote_values_formatted[sub_chnl]:
changed = True
if changed:
ui = UiLoader().load(os.path.join(BLACS_DIR, 'tab_value_changed_dds.ui'))
ui.channel_label.setText(self._DDS[channel].name)
for sub_chnl in front_value:
ui.__getattribute__('front_%s_value'%sub_chnl).setText(front_values_formatted[sub_chnl])
ui.__getattribute__('remote_%s_value'%sub_chnl).setText(remote_values_formatted[sub_chnl])
# Hide unused sub_channels of this DDS
for sub_chnl in self._DDS[channel].get_unused_subchnl_list():
ui.__getattribute__('front_%s_value'%sub_chnl).setVisible(False)
ui.__getattribute__('front_%s_label'%sub_chnl).setVisible(False)
ui.__getattribute__('remote_%s_value'%sub_chnl).setVisible(False)
ui.__getattribute__('remote_%s_label'%sub_chnl).setVisible(False)
elif channel in self._DO:
# This is an easy case!
front_value = str(bool(int(self._last_programmed_values[channel])))
remote_value = str(bool(int(remote_value)))
if front_value != remote_value:
changed = True
ui = UiLoader().load(os.path.join(BLACS_DIR, 'tab_value_changed.ui'))
ui.channel_label.setText(self._DO[channel].name)
ui.front_value.setText(front_value)
ui.remote_value.setText(remote_value)
elif channel in self._AO:
# A intermediately complicated case!
front_value = ("%."+str(self._AO[channel]._decimals)+"f")%self._last_programmed_values[channel]
remote_value = ("%."+str(self._AO[channel]._decimals)+"f")%remote_value
if front_value != remote_value:
changed = True
ui = UiLoader().load(os.path.join(BLACS_DIR, 'tab_value_changed.ui'))
ui.channel_label.setText(self._AO[channel].name)
ui.front_value.setText(front_value)
ui.remote_value.setText(remote_value)
else:
raise RuntimeError('device_base_class.py is not programmed to handle channel types other than DDS, AO and DO in check_remote_values')
if changed:
overall_changed = True
# Add the changed widget for this channel to a layout!
self._ui.changed_layout.addWidget(ui)
# save the radio buttons so that we can access their state later!
self._changed_radio_buttons[channel] = ui.use_remote_values
if overall_changed:
# TODO: Disable all widgets for this device, including virtual device widgets...how do I do that?????
# Probably need to add a disable/enable method to analog/digital/DDS widgets that disables the widget and is orthogonal to the lock/unlock system
# Should probably set a tooltip on the widgets too explaining why they are disabled!
# self._device_widget.setSensitive(False)
# show the remote_values_change dialog
self._changed_widget.show()
# Add an "apply" button and link to on_resolve_value_inconsistency
buttonWidget = QWidget()
buttonlayout = QHBoxLayout(buttonWidget)
button = QPushButton(QIcon(':/qtutils/fugue/arrow-turn-000-left'), "Apply")
button.clicked.connect(self.on_resolve_value_inconsistency)
buttonlayout.addWidget(button)
buttonlayout.addStretch()
self._ui.changed_layout.addWidget(buttonWidget)
def on_resolve_value_inconsistency(self):
# get the values and update the device/front panel
needs_programming = False
for channel,radio in self._changed_radio_buttons.items():
if radio.isChecked():
output = self.get_channel(channel)
if output is None:
raise RuntimeError('on_resolve_value_inconsistency is being asked to handle a channel that is not a DDS, AO or DO (channel: %s, device: %s)'%(channel,self.device_name))
# The device already has this value, so no need to program it!
output.set_value(self._last_remote_values[channel],program=False)
else:
# we only need to program the device if one or more channels is using the front panel value
needs_programming = True
if needs_programming:
self.program_device()
else:
# Now that the inconsistency is resolved, Let's update the "last programmed values"
# to match the remote values
self._last_programmed_values = self.get_front_panel_values()
self._changed_widget.hide()
@define_state(MODE_BUFFERED,True)
def start_run(self,notify_queue):
raise NotImplementedError('The device %s has not implemented a start method and so cannot be used to trigger the experiment to begin. Please implement the start method or use a different pseudoclock as the master pseudoclock'%self.device_name)
@define_state(MODE_MANUAL,True)
def transition_to_buffered(self,h5_file,notify_queue):
# Get rid of any "remote values changed" dialog
self._changed_widget.hide()
self.mode = MODE_TRANSITION_TO_BUFFERED
h5_file = path_to_agnostic(h5_file)
# transition_to_buffered returns the final values of the run, to update the GUI with at the end of the run:
transitioned_called = [self._primary_worker]
front_panel_values = self.get_front_panel_values()
self._final_values = yield(self.queue_work(self._primary_worker,'_transition_to_buffered',self.device_name,h5_file,front_panel_values,self._force_full_buffered_reprogram))
if self._final_values is not None:
for worker in self._secondary_workers:
transitioned_called.append(worker)
extra_final_values = yield(self.queue_work(worker,'_transition_to_buffered',self.device_name,h5_file,front_panel_values,self.force_full_buffered_reprogram))
if extra_final_values is not None:
self._final_values.update(extra_final_values)
else:
self._final_values = None
break
# If we get None back, then the worker process did not finish properly
if self._final_values is None:
notify_queue.put([self.device_name,'fail'])
self.abort_transition_to_buffered(transitioned_called)
else:
if self._supports_smart_programming:
self.force_full_buffered_reprogram = False
self._ui.button_clear_smart_programming.setEnabled(True)
# Tell the queue manager that we're done:
self.mode = MODE_BUFFERED
notify_queue.put([self.device_name,'success'])
@define_state(MODE_TRANSITION_TO_BUFFERED,False)
def abort_transition_to_buffered(self,workers=None):
if workers is None:
workers = [self._primary_worker]
workers.extend(self._secondary_workers)
success = True
for worker in workers:
abort_success = yield(self.queue_work(worker,'abort_transition_to_buffered'))
if not abort_success:
success = False
# don't break here, so that as much of the device is returned to normal
if success:
self.mode = MODE_MANUAL
self.program_device()
else:
raise Exception('Could not abort transition_to_buffered. You must restart this device to continue')
@define_state(MODE_BUFFERED,False)
def abort_buffered(self,notify_queue):
success = yield(self.queue_work(self._primary_worker,'abort_buffered'))
for worker in self._secondary_workers:
abort_success = yield(self.queue_work(worker,'abort_buffered'))
if not abort_success:
success = False
# don't break here, so that as much of the device is returned to normal
if success:
notify_queue.put([self.device_name,'success'])
self.mode = MODE_MANUAL
self.program_device()
else:
notify_queue.put([self.device_name,'fail'])
raise Exception('Could not abort the buffered sequence. You must restart this device to continue')
@define_state(MODE_BUFFERED,False)
def transition_to_manual(self,notify_queue,program=False):
self.mode = MODE_TRANSITION_TO_MANUAL
success = yield(self.queue_work(self._primary_worker,'transition_to_manual'))
for worker in self._secondary_workers:
transition_success = yield(self.queue_work(worker,'transition_to_manual'))
if not transition_success:
success = False
# don't break here, so that as much of the device is returned to normal
# Update the GUI with the final values of the run:
for channel, value in self._final_values.items():
if channel in self._AO:
self._AO[channel].set_value(value,program=False)
elif channel in self._DO:
self._DO[channel].set_value(value,program=False)
elif channel in self._image:
self._image[channel].set_value(value,program=False)
elif channel in self._DDS:
self._DDS[channel].set_value(value,program=False)
if success:
notify_queue.put([self.device_name,'success'])
self.mode = MODE_MANUAL
else:
notify_queue.put([self.device_name,'fail'])
raise Exception('Could not transition to manual. You must restart this device to continue')
if program:
self.program_device()
else:
self._last_programmed_values = self.get_front_panel_values()
class DeviceWorker(Worker):
def init(self):
# You read correctly, this isn't __init__, it's init. It's the
# first thing that will be called in the new process. You should
# do imports here, define instance variables, that sort of thing. You
# shouldn't import the hardware modules at the top of your file,
# because then they will be imported in both the parent and
# the child processes and wont be cleanly restarted when the subprocess
# is restarted. Since we're inside a method call though, you'll
# have to use global statements for the module imports, as shown
# below. Either that or you can make them instance variables, ie:
# import module; self.module = module. Up to you, I prefer
# the former.
global serial; import serial
global time; import time
self.fpv = {}
def initialise(self):
pass
def shutdown(self):
pass
def program_manual(self,front_panel_values):
for channel,value in front_panel_values.items():
if type(value) != type(True):
front_panel_values[channel] += 0.001
self.fpv = front_panel_values
return front_panel_values
def check_remote_values(self):
front_panel_values = {}
for channel,value in self.fpv.items():
if type(value) != type(True):
front_panel_values[channel] = value + 1.1
else:
front_panel_values[channel] = not value
if not front_panel_values:
front_panel_values['ao0'] = 0
return front_panel_values
def transition_to_buffered(self,device_name,h5file,front_panel_values,refresh):
time.sleep(3)
for channel,value in front_panel_values.items():
if type(value) != type(True):
front_panel_values[channel] += 0.003
return front_panel_values
def abort_transition_to_buffered(self):
pass
def abort_buffered(self):
pass
def transition_to_manual(self):
return True
if __name__ == '__main__':
import sys
import logging.handlers
# Setup logging:
logger = logging.getLogger('BLACS')
handler = logging.handlers.RotatingFileHandler(os.path.join(BLACS_DIR, 'BLACS.log'), maxBytes=1024**2, backupCount=0)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if sys.stdout is not None and sys.stdout.isatty():
terminalhandler = logging.StreamHandler(sys.stdout)
terminalhandler.setFormatter(formatter)
terminalhandler.setLevel(logging.DEBUG)
logger.addHandler(terminalhandler)
else:
sys.stdout = sys.stderr = open(os.devnull)
logger.setLevel(logging.DEBUG)
#labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
if __name__ == '__main__':
# Test case!
from connections import ConnectionTable
from labscript_utils.qtwidgets.dragdroptab import DragDropTabWidget
class MyTab(DeviceTab):
def initialise_GUI(self):
# Create Digital Output Objects
do_prop = {}
for i in range(32):
do_prop['port0/line%d'%i] = {}
self.create_digital_outputs(do_prop)
# Create Analog Output objects
ao_prop = {}
for i in range(4):
ao_prop['ao%d'%i] = {'base_unit':'V',
'min':-10.0,
'max':10.0,
'step':0.01,
'decimals':3
}
self.create_analog_outputs(ao_prop)
# Create widgets for output objects
dds_widgets,ao_widgets,do_widgets = self.auto_create_widgets()
# This function allows you do sort the order of widgets by hardware name.
# it is pass to the Python 'sorted' function as key=sort when passed in as
# the 3rd item of a tuple p(the tuple being an argument of self.auto_place_widgets()
#
# This function takes the channel name (hardware name) and returns a string (or whatever)
# that when sorted alphabetically, returns the correct order
def sort(channel):
port,line = channel.replace('port','').replace('line','').split('/')
port,line = int(port),int(line)
return '%02d/%02d'%(port,line)
# and auto place them in the UI
self.auto_place_widgets(("DDS Outputs",dds_widgets),("Analog Outputs",ao_widgets),("Digital Outputs - Port 0",do_widgets,sort))
# Set the primary worker
self.create_worker("my_worker_name",DeviceWorker,{})
self.primary_worker = "my_worker_name"
self.create_worker("my_secondary_worker_name",DeviceWorker,{})
self.add_secondary_worker("my_secondary_worker_name")
self.supports_remote_value_check(True)
# Create buttons to test things!
button1 = QPushButton("Transition to Buffered")
if PY2:
from Queue import Queue
else:
from queue import Queue
button1.clicked.connect(lambda: self.transition_to_buffered('',Queue()))
self.get_tab_layout().addWidget(button1)
button2 = QPushButton("Transition to Manual")
button2.clicked.connect(lambda: self.transition_to_manual(Queue()))
self.get_tab_layout().addWidget(button2)
connection_table = ConnectionTable(r'example_connection_table.h5')
class MyWindow(QWidget):
def __init__(self,*args,**kwargs):
QWidget.__init__(self,*args,**kwargs)
self.are_we_closed = False
def closeEvent(self,event):
if not self.are_we_closed:
event.ignore()
self.my_tab.shutdown()
self.are_we_closed = True
QTimer.singleShot(1000,self.close)
else:
if not self.my_tab.shutdown_complete:
QTimer.singleShot(1000,self.close)
else:
event.accept()
def add_my_tab(self,tab):
self.my_tab = tab
app = QApplication(sys.argv)
window = MyWindow()
layout = QVBoxLayout(window)
notebook = DragDropTabWidget()
layout.addWidget(notebook)
tab1 = MyTab(notebook,settings = {'device_name': 'ni_pcie_6363_0', 'connection_table':connection_table})
window.add_my_tab(tab1)
window.show()
def run():
app.exec_()
sys.exit(run())
| 48.959724 | 251 | 0.577164 |
8c5a397a6c6056e5736dd015d3044b09e322cbe1 | 30,502 | py | Python | ml-dft-sandia/networks/models/utils/trained_model_inference.py | DanielKotik/mala | 1b89a78f5ddecb1df21d2753715001ffe4250fc1 | [
"BSD-3-Clause"
] | 11 | 2021-09-06T18:59:24.000Z | 2022-03-17T13:36:52.000Z | ml-dft-sandia/networks/models/utils/trained_model_inference.py | DanielKotik/mala | 1b89a78f5ddecb1df21d2753715001ffe4250fc1 | [
"BSD-3-Clause"
] | 92 | 2021-09-04T06:38:14.000Z | 2022-03-31T11:01:49.000Z | ml-dft-sandia/networks/models/utils/trained_model_inference.py | DanielKotik/mala | 1b89a78f5ddecb1df21d2753715001ffe4250fc1 | [
"BSD-3-Clause"
] | 10 | 2021-09-07T13:03:30.000Z | 2022-03-23T07:49:14.000Z | import numpy as np
import os, sys
import re
from glob import glob
import matplotlib
import matplotlib.pyplot as plt
#import ldos_calc
import DFT_calculators
from functools import partial
import argparse
import horovod.torch as hvd
from scipy.stats import gaussian_kde
import torch
import pickle
sys.path.append("../fp_ldos_feedforward/src")
import fp_ldos_networks
import train_networks
import data_loaders
sys.path.append("../fp_ldos_feedforward/src/charm/clustering")
import cluster_fingerprints
sys.path.append("../fp_ldos_feedforward/src/charm")
import big_data
# Training settings
parser = argparse.ArgumentParser(description='FP-LDOS Feedforward Network')
# Training
parser.add_argument('--temp', type=str, default="298K", metavar='T',
help='temperature of snapshot to train on (default: "298K")')
parser.add_argument('--gcc', type=str, default="2.699", metavar='GCC',
help='density of snapshot to train on (default: "2.699")')
parser.add_argument('--elvls', type=int, default=250, metavar='ELVLS',
help='number of ldos energy levels (default: 250)')
parser.add_argument('--nxyz', type=int, default=200, metavar='GCC',
help='number of x/y/z elements (default: 200)')
parser.add_argument('--dos-snapshot', type=int, default=2, metavar='N',
help='snapshot of temp/gcc with DOS file for egrid (default: 2)')
parser.add_argument('--max-snapshots', type=int, default=10, metavar='N',
help='max number of inference snapshots due to memory restrictions (default: 10)')
parser.add_argument('--snapshot-offset', type=int, default=0, metavar='N',
help='which snapshot to begin inference (default: 0)')
#parser.add_argument('--num-atoms', type=int, default=256, metavar='N',
# help='number of atoms in the snapshot (default: 256)')
parser.add_argument('--num-slices', type=int, default=1, metavar='N',
help='number of density z slices to plot (default: 1)')
#parser.add_argument('--no_convert', action='store_true', default=False,
# help='Do not Convert units from Rydbergs')
#parser.add_argument('--log', action='store_true', default=False,
# help='Apply log function to densities')
parser.add_argument('--cpu', action='store_true', default=False,
help='Run inference of a GPU trained model on a CPU machine.')
parser.add_argument('--integration', type=str, default="analytic", metavar='T',
choices=["analytic", "trapz", "simps", "quad"],
help='type of integration from {"trapz", "simps", "quad", "analytic"} (default: "analytic")')
# Directory Locations
parser.add_argument('--pred-dir', type=str, \
default="../fp_ldos_feedforward/output", \
metavar="str", help='path to ldos prediction directory (default: ../fp_ldos_feedforward/output)')
parser.add_argument('--ldos-dir', type=str, \
default="../../training_data/ldos_data", \
metavar="str", help='path to ldos data directory (default: ../../training_data/ldos_data)')
parser.add_argument('--fp-dir', type=str, \
default="../../training_data/fp_data", \
metavar="str", help='path to fp data (QE .out files) directory (default: ../../training_data/fp_data)')
parser.add_argument('--output-dir', type=str, \
default="./inference_figs", \
metavar="str", help='path to output directory (default: ./inference_figs)')
args = parser.parse_args()
hvd.init()
if (hvd.rank() == 0):
print("\n\nBegin LDOS inference\n\n")
# Liquid
#fig_snapshots = [9]
# Solid
#fig_snapshots = [19]
# Hybrid
#fig_snapshots = [0, 19]
# All
fig_snapshots = [0,9,10,19]
tempv = int(args.temp[:-1])
DFT_calculators.temp = tempv
DFT_calculators.gcc = float(args.gcc)
#print("Tempv: %d" % tempv)
#DFT_calculators.set_temp(tempv)
#DFT_calculators.set_gcc(args.gcc)
# Conversion factor from Rydberg to eV
#Ry2eV = ldos_calc.get_Ry2eV()
#ar_en = ldos_calc.get_energies(args.temp, args.gcc, args.elvls)
args.output_dirs = glob(args.pred_dir + "/*/")
if (hvd.rank() == 0):
print("\nProcessing these model directories: ")
for idx, current_dir in enumerate(args.output_dirs):
print("Directory %d: %s" % (idx, current_dir))
# DFT
#args.dft_files = sorted(glob(args.fp_dir + "/%s/%sgcc/QE*.out" % (args.temp, args.gcc)))
args.dft_files = glob(args.fp_dir + "/%s/%sgcc/QE*.out" % (args.temp, args.gcc))
args.dft_files.sort(key=lambda f: int(re.sub('\D', '', f)))
if (hvd.rank() == 0):
print("\nAvailable DFT files: ")
for idx, dft_file in enumerate(args.dft_files):
print("DFT %d: %s" % (idx, dft_file))
# FP
#args.fp_files = sorted(glob(args.fp_dir + "/%s/%sgcc/*fp*.npy" % (args.temp, args.gcc)))
args.fp_files = glob(args.fp_dir + "/%s/%sgcc/*fp*.npy" % (args.temp, args.gcc))
args.fp_files.sort(key=lambda f: int(re.sub('\D', '', f)))
if (hvd.rank() == 0):
print("\nAvailable FP files: ")
for idx, fp_file in enumerate(args.fp_files):
print("FP %d: %s" % (idx, fp_file))
# LDOS
#args.ldos_files = sorted(glob(args.ldos_dir + "/%s/%sgcc/*ldos*.npy" % (args.temp, args.gcc)))
args.ldos_files = glob(args.ldos_dir + "/%s/%sgcc/*ldos*.npy" % (args.temp, args.gcc))
args.ldos_files.sort(key=lambda f: int(re.sub('\D', '', f)))
if (hvd.rank() == 0):
print("\nAvailable LDOS files: ")
for idx, ldos_file in enumerate(args.ldos_files):
print("LDOS %d: %s" % (idx, ldos_file))
args.snapshots = np.min([len(args.fp_files), len(args.ldos_files)])
#if (args.snapshots > args.max_snapshots):
# args.snapshots = args.max_snapshots
# Testing
#args.snapshot_offset = 0;
#args.snapshot_offset = 10;
#args.snapshots = 10;
#exit(0);
for i in range(args.snapshot_offset + args.max_snapshots, args.snapshots):
del args.dft_files[args.snapshot_offset + args.max_snapshots]
del args.fp_files[args.snapshot_offset + args.max_snapshots]
del args.ldos_files[args.snapshot_offset + args.max_snapshots]
for i in range(args.snapshot_offset):
del args.dft_files[0]
del args.fp_files[0]
del args.ldos_files[0]
if (args.snapshots > args.max_snapshots):
args.snapshots = args.max_snapshots
if (hvd.rank() == 0):
print("\nProcessing these DFT files: ")
for idx, dft_file in enumerate(args.dft_files):
print("DFT %d: %s" % (idx, dft_file))
print("\nProcessing these FP files: ")
for idx, fp_file in enumerate(args.fp_files):
print("FP %d: %s" % (idx, fp_file))
print("\nProcessing these LDOS files: ")
for idx, ldos_file in enumerate(args.ldos_files):
print("LDOS %d: %s" % (idx, ldos_file))
#exit(0);
dft_results = []
true_dos = []
true_eband = []
true_enum = []
target_ldos = []
for i in range(args.snapshots):
if (hvd.rank() == 0):
print("\nCalculating True and Target DFT results for snapshot%d" % (i + args.snapshot_offset))
# dft_fname = args.fp_dir + "/%s/%sgcc/QE_Al.scf.pw.snapshot%d.out" % \
# (args.temp, args.gcc, i)
# target_ldos_fname = args.ldos_dir + "/%s/%sgcc/Al_ldos_%dx%dx%dgrid_%delvls_snapshot%d.npy" % \
# (args.temp, args.gcc, args.nxyz, args.nxyz, args.nxyz, args.elvls, i)
qe_dos_fname = args.ldos_dir + "/%s/%sgcc/Al_dos_%delvls_snapshot%d.txt" % \
(args.temp, args.gcc, args.elvls, args.dos_snapshot)
if (hvd.rank() == 0):
print("Calculating DFT Eigen Results.")
### DFT Eigen Results ###
dft_results.append(DFT_calculators.DFT_results(args.dft_files[i]))
if (os.path.exists(qe_dos_fname)):
# QE Dos Results to get dos_e_grid
qe_dos = DFT_calculators.DOS.from_dos_file(dft_results[i], qe_dos_fname)
dos_e_grid = qe_dos.e_grid
sigma = dos_e_grid[1] - dos_e_grid[0]
# Smearing of 2sigma
wide_gaussian = partial(DFT_calculators.gaussian, sigma = 2.0 * sigma)
true_dos.append(DFT_calculators.DOS.from_calculation(dft_results[i], dos_e_grid, wide_gaussian))
true_efermi = DFT_calculators.dos_2_efermi(true_dos[i], tempv, integration=args.integration)
# print("ef1: ", true_efermi)
true_eband.append(DFT_calculators.dft_2_eband(dft_results[i], e_fermi='sc', temperature=tempv))
true_enum.append(DFT_calculators.dft_2_enum(dft_results[i], e_fermi='sc', temperature=tempv))
#true_enum = DFT_calculators.dft_2_enum(dft_results, e_fermi=true_efermi, temperature=tempv)
#exit(0)
# print("Calculating QE DOS Results.")
### QE DOS Results ###
# qe_dos_efermi = DFT_calculators.dos_2_efermi(qe_dos, tempv, integration=args.integration)
# qe_dos_eband = DFT_calculators.dos_2_eband(qe_dos, e_fermi=qe_dos_efermi, \
# temperature=tempv, integration=args.integration)
# qe_dos_enum = DFT_calculators.dos_2_enum(qe_dos, e_fermi=qe_dos_efermi, \
# temperature=tempv, integration=args.integration)
if (hvd.rank() == 0):
print("Calculating Target LDOS Results.")
### Target LDOS Results ###
ldos_e_grid = dos_e_grid[:-1]
target_ldos.append(DFT_calculators.LDOS(dft_results[i], ldos_e_grid, args.ldos_files[i]))
target_ldos[i].do_calcs()
print("TRUE BE: %4.4f, TARGET LDOS BE: %4.4f, DIFF %4.4f" % (true_eband[i], target_ldos[i].eband, (target_ldos[i].eband - true_eband[i])))
#target_dos = qe_ldos.dos
#target_dos_efermi = DFT_calculators.dos_2_efermi(target_dos, tempv, integration=args.integration)
#target_eband = DFT_calculators.dos_2_eband(target_dos, e_fermi=target_dos_efermi, temperature=tempv, integration=args.integration)
#target_enum = DFT_calculators.dos_2_enum(target_dos, e_fermi=target_dos_efermi, temperature=tempv, integration=args.integration)
if (hvd.rank() == 0):
print("\n\nCalculating ML Predicted LDOS Results.")
#hvd.shutdown();
#exit(0);
#models_idx = 0
#models_limit = 1
for idx, current_dir in enumerate(args.output_dirs):
if (hvd.rank() == 0):
print("\n\nPerforming inference for Model %d from output_dir: %s" % (idx, current_dir))
# pred_ldos_fname = current_dir + "fp_ldos_predictions.npy"
pred_ldos_fname = current_dir + "fp_ldos_model.pth"
if ("fp_ldos_dir" in current_dir):
print("Found dir: %s. Processing." % current_dir)
elif (not os.path.exists(pred_ldos_fname)):
if (hvd.rank() == 0):
print("Skipped %s! No predictions." % current_dir)
continue;
# if (models_idx >= models_limit):
# continue
# models_idx += 1
# ML Predictions
# pred_ldos = np.load(current_dir + "fp_ldos_predictions.npy")
# pred_ldos = DFT_calculators.LDOS(dft_results, ldos_e_grid, pred_ldos_fname)
# if (pred_ldos.ldos.shape[3] != args.elvls):
# if (hvd.rank() == 0):
# print("Skipped %s! Bad elvls." % current_dir)
# continue;
fp_scaler_fname = glob(current_dir + "charm_input*")
ldos_scaler_fname = glob(current_dir + "charm_output*")
# fp_factor_fname = glob(current_dir + "fp_row*.npy")
# ldos_factor_fname = glob(current_dir + "ldos_*.npy")
# shift_fname = glob(current_dir + "log_shift.npy")
if (hvd.rank() == 0):
print("Found fp scaler file: %s" % fp_scaler_fname[0])
print("Found ldos scaler file: %s" % ldos_scaler_fname[0])
# Normalization factors
fp_scaler = torch.load(fp_scaler_fname[0])
ldos_scaler = torch.load(ldos_scaler_fname[0])
# fp_row_norms = "row" in fp_factor_fname[0]
# fp_minmax_norms = "max" in fp_factor_fname[0]
# ldos_row_norms = "row" in ldos_factor_fname[0]
# ldos_minmax_norms = "max" in ldos_factor_fname[0]
model_fpath = current_dir + "fp_ldos_model.pth"
args_fpath = current_dir + "commandline_args.pth"
# args_fpath = current_dir + "commandline_args.pkl"
if (os.path.exists(model_fpath) and os.path.exists(args_fpath)):
if (hvd.rank() == 0):
print("Found model: %s, args: %s" % (model_fpath, args_fpath))
else:
raise ValueError('No model/args path found.')
model_args = torch.load(args_fpath)
if (args.cpu):
model = torch.load(model_fpath, map_location='cpu')
model_args.cuda = False
else:
model = torch.load(model_fpath)
model = model.eval()
# margs_file = open(args_fpath, "rb")
# model_args = pickle.load(open(args_fpath, "rb"))
# margs_file.close()
# print(model_args.fp_length)
# exit(0);
old_fp = None
old_ldos = None
predictions = None
inference_fp = None
for i in range(args.snapshots):
if (hvd.rank() == 0):
print("\nWorking on Snapshot%d" % (i + args.snapshot_offset))
old_fp = inference_fp
# Loading FP and transforming
inference_fp = np.load(args.fp_files[i])
# Remove Coords
inference_fp = inference_fp[:,:,:, 3:]
inference_fp = np.reshape(inference_fp, [model_args.grid_pts, model_args.fp_length])
# inference_fp = np.reshape(inference_fp, [args.nxyz ** 3, 91])
# print(fp_factors.shape)
if (hvd.rank() == 0):
print("\nTransforming model input FPs")
# for row in range(model_args.fp_length):
# inference_fp[:, row] = (inference_fp[:, row] - fp_factors[0, row]) / fp_factors[1, row]
inference_fp = fp_scaler.do_scaling_sample(inference_fp)
if (False and i > 0):
print("Old/New FP Diffs: ")
for row in range(model_args.fp_length):
mind = np.min(abs(inference_fp[:, row] - old_fp[:, row]))
mend = np.mean(abs(inference_fp[:, row] - old_fp[:, row]))
maxd = np.max(abs(inference_fp[:, row] - old_fp[:, row]))
print("MIN/MEAN/MAX DIFFS: %4.4f %4.4f %4.4f" % (mind, mend, maxd))
inference_fp_dataset = torch.utils.data.TensorDataset(torch.tensor(inference_fp, dtype=torch.float32),
torch.ones([model_args.grid_pts, 1], dtype=torch.float32))
if (hvd.rank() == 0):
print("\nBuilding Sampler/Loader")
inference_sampler = torch.utils.data.sampler.SequentialSampler(inference_fp_dataset)
inference_loader = torch.utils.data.DataLoader(inference_fp_dataset, \
batch_size=model_args.test_batch_size, \
sampler=inference_sampler)
hvd.allreduce(torch.tensor(0), name='barrier')
# Running Model
old_ldos = predictions
predictions = np.empty([model_args.grid_pts, model_args.ldos_length])
hidden_n = model.test_hidden
data_idx = 0
for batch_idx, (data, target) in enumerate(inference_loader):
if model_args.cuda:
data, target = data.cuda(), target.cuda()
output, hidden_n = model(data, hidden_n)
hidden_n = hidden_n[0].detach(), hidden_n[1].detach()
num_samples = output.shape[0]
if (model_args.cuda):
predictions[data_idx:data_idx + num_samples, :] = output.cpu().detach().numpy()
else:
predictions[data_idx:data_idx + num_samples, :] = output.detach().numpy()
data_idx += num_samples
if (batch_idx % (model_args.log_interval * 10) == 0 % (model_args.log_interval * 10) and hvd.rank() == 0):
print("Test batch_idx %d of %d" % (batch_idx, len(inference_loader)))
if (hvd.rank() == 0):
print("Inference Done\n")
# exit(0);
if (False and i > 0):
print("Old/New LDOS Diffs: ")
for row in range(model_args.ldos_length):
mind = np.min(abs(predictions[:, row] - old_ldos[:, row]))
mend = np.mean(abs(predictions[:, row] - old_ldos[:, row]))
maxd = np.max(abs(predictions[:, row] - old_ldos[:, row]))
print("MIN/MEAN/MAX DIFFS: %4.4f %4.4f %4.4f" % (mind, mend, maxd))
# print("Done")
# exit(0);
pred_ldos = DFT_calculators.LDOS(dft_results[i], ldos_e_grid, predictions)
# Denormalizing LDOS
# if (hvd.rank() == 0):
# print("Denormalizing LDOS with row: %r, minmax: %r" % (ldos_row_norms, ldos_minmax_norms))
# if (ldos_row_norms):
# if(ldos_minmax_norms):
# for row, (minv, maxv) in enumerate(np.transpose(ldos_factors)):
# pred_ldos.ldos[:, :, :, row] = (pred_ldos.ldos[:, :, :, row] * (maxv - minv)) + minv
# else:
# for row, (meanv, stdv) in enumerate(np.transpose(ldos_factors)):
# pred_ldos.ldos[:, :, :, row] = (pred_ldos.ldos[:, :, :, row] * stdv) + meanv
# else:
# if(ldos_minmax_norms):
# for row, (minv, maxv) in enumerate(np.transpose(ldos_factors)):
# pred_ldos.ldos = (pred_ldos.ldos * (maxv - minv)) + minv
# else:
# for row, (meanv, stdv) in enumerate(np.transpose(ldos_factors)):
# pred_ldos.ldos = (pred_ldos.ldos * stdv) + meanv
#
# if (len(shift_fname) != 0):
#
# print("Reverting ldos log and shift")
# log_shift = np.load(shift_fname[0])
# pred_ldos.ldos = np.exp(pred_ldos.ldos) - log_shift
if (hvd.rank() == 0):
print("Denormalizing LDOS with charm_scaler")
pred_ldos.ldos = ldos_scaler.undo_scaling_sample(pred_ldos.ldos)
pred_ldos.do_calcs()
# pred_ldos = np.reshape(pred_ldos, target_ldos.shape)
print("Pred LDOS shape: ", pred_ldos.ldos.shape)
print("Target LDOS shape: ", target_ldos[i].ldos.shape)
# Density predictions
print("Pred_LDOS->Density")
print("Pred_Density shape: ", pred_ldos.density.shape)
print("Target_Density shape: ", target_ldos[i].density.shape)
# DOS predictions
print("Pred_LDOS->DOS")
print("Pred_DOS shape: ", pred_ldos.dos.dos.shape)
print("Target_DOS shape: ", target_ldos[i].dos.dos.shape)
# Band Energy predictions
# print("Pred_DOS->BandEnergy")
# pred_be = ldos_calc.dos_to_band_energy(pred_dos, args.temp, args.gcc, args.simple)
# print("Target_DOS->BandEnergy")
# target_be = ldos_calc.dos_to_band_energy(target_dos, args.temp, args.gcc, args.simple)
# Convert to Rydbergs
# if (not args.no_convert):
#
# pred_density = pred_density / Ry2eV
# true_density = true_density / Ry2eV
#
# pred_dos = pred_dos / Ry2eV
# true_dos = true_dos / Ry2eV
#
# pred_be = pred_be / (Ry2eV)
# true_be = true_be / (Ry2eV)
# Pred/Target Error Results
print("\nPred/Target Density Min: %f, %f" % (np.min(pred_ldos.density), np.min(target_ldos[i].density)))
print("Pred/Target Density Max: %f, %f" % (np.max(pred_ldos.density), np.max(target_ldos[i].density)))
print("Pred/Target Density Mean: %f, %f" % (np.mean(pred_ldos.density), np.mean(target_ldos[i].density)))
print("\nError Density Min: ", np.min(abs(pred_ldos.density - target_ldos[i].density)))
print("Error Density Max: ", np.max(abs(pred_ldos.density - target_ldos[i].density)))
print("Error Density Mean: ", np.mean(abs(pred_ldos.density - target_ldos[i].density)))
print("\nPred/Target Error DOS L1_norm: ", np.linalg.norm((target_ldos[i].dos.dos - pred_ldos.dos.dos), ord=1))
print("Pred/Target Error DOS L2_norm: ", np.linalg.norm((target_ldos[i].dos.dos - pred_ldos.dos.dos), ord=2))
print("Pred/Target Error DOS Linf_norm: ", np.linalg.norm((target_ldos[i].dos.dos - pred_ldos.dos.dos), ord=np.inf))
#dos_rmse =
#print("Pred/Target Error DOS RMSE (Oxford): ", dos_rmse)
print("\n\nBand Energy Comparisons")
print("Pred BE: ", pred_ldos.eband)
print("Target BE: ", target_ldos[i].eband)
# print("QE DOS BE: ", qe_dos_eband)
print("DFT Eigen BE: ", true_eband[i])
print("\nPred/Target BE diff: ", (target_ldos[i].eband - pred_ldos.eband))
print("Pred/Target BE relative diff: ", (target_ldos[i].eband - pred_ldos.eband) / target_ldos[i].eband * 100)
print("Pred/Target BE meV/Atom diff: ", (target_ldos[i].eband - pred_ldos.eband) / dft_results[i].num_atoms * 1000)
print("\nPred/True Eigen BE diff: ", (true_eband[i] - pred_ldos.eband))
print("Pred/True Eigen BE relative diff: ", (true_eband[i] - pred_ldos.eband) / true_eband[i] * 100)
print("Pred/True Eigen BE meV/Atom diff: ", (true_eband[i] - pred_ldos.eband) / dft_results[i].num_atoms * 1000)
print("\n\nElectron Num Comparisons")
print("Pred ENUM: ", pred_ldos.enum)
print("Target ENUM: ", target_ldos[i].enum)
# print("QE DOS ENUM: ", qe_dos_enum)
print("DFT Eigen ENUM: ", true_enum[i])
print("\nPred/Target ENUM diff: ", (target_ldos[i].enum - pred_ldos.enum))
print("Pred/Target ENUM relative diff: ", (target_ldos[i].enum - pred_ldos.enum) / target_ldos[i].enum * 100)
print("Pred/Target ENUM electron/Atom diff: ", (target_ldos[i].enum - pred_ldos.enum) / dft_results[i].num_atoms)
print("\nPred/True Eigen ENUM diff: ", (true_enum[i] - pred_ldos.enum))
print("Pred/True Eigen ENUM relative diff: ", (true_enum[i] - pred_ldos.enum) / true_enum[i] * 100)
print("Pred/True Eigen ENUM electron/Atom diff: ", (true_enum[i] - pred_ldos.enum) / dft_results[i].num_atoms)
file_format = 'jpg'
if (i + args.snapshot_offset) in fig_snapshots:
# Create output dir if it doesn't exist
if not os.path.exists(args.output_dir):
print("\nCreating output folder %s\n" % args.output_dir)
os.makedirs(args.output_dir)
matplotlib.rcdefaults()
font = {'weight': 'normal', 'size': 13}
matplotlib.rc('font', **font)
# DOS and error plots
# Truncate dos
t_ldos_e_grid = ldos_e_grid[50:200]
t_target_dos = target_ldos[i].dos.dos[50:200]
t_pred_dos = pred_ldos.dos.dos[50:200]
print("Fermi Energy: ", target_ldos[i].e_fermi)
fig, (ax0, ax1) = plt.subplots(2,1)
#ax0.plot(dos_e_grid, true_dos[i].dos, "-k")
ax0.plot(t_ldos_e_grid, t_target_dos, "-k")
ax0.plot(t_ldos_e_grid, t_pred_dos, "--r")
ax0.plot([target_ldos[i].e_fermi, target_ldos[i].e_fermi], [80, 120], '-g')
ax0.legend(["DFT LDOS Target", "ML-DFT Prediction", "Fermi Energy"])
ax0.set_ylabel("DOS ($eV^{-1}$)")
ax0.set_ylim([0, 200])
#ax1.plot(ldos_e_grid, target_ldos[i].dos.dos - true_dos[i].dos[:-1], "-b")
ax1.plot(t_ldos_e_grid, t_pred_dos - t_target_dos, "-r")
ax1.legend(["ML-DFT Pred DOS Error"])
ax1.set_xlabel("Energy (eV)")
ax1.set_ylabel("DOS Error ($eV^{-1}$)")
ax1.set_ylim([-2.0, 2.0])
plt.tight_layout()
dos_fname = "/pred_target_true_dos_model%d_snapshot%d.%s" % (idx, (i + args.snapshot_offset), file_format)
plt.savefig(args.output_dir + dos_fname, format=file_format)
print("\nDOS plot created, Model: %d, Snapshot: %d" % (idx, (i + args.snapshot_offset)))
# Density Difference plots
fig, ax = plt.subplots(1, 1)
target_density = np.reshape(target_ldos[i].density, [args.nxyz ** 3])
pred_density = np.reshape(pred_ldos.density, [args.nxyz ** 3])
target_max = np.max(target_density)
target_max = .055
density_err_max = np.max(np.abs(target_density - pred_density))
density_err_mean = np.mean(np.abs(target_density - pred_density))
density_err_std = np.std(np.abs(target_density - pred_density))
print("Density error max: ", density_err_max)
print("Density error mean: ", density_err_mean)
print("Density error std: ", density_err_std)
# Color heat map
idxs = np.arange(200**3)
idxs = np.random.choice(idxs, 10000, replace=False)
x_mod = target_density[idxs]
y_mod = pred_density[idxs]
xy = np.vstack([x_mod, y_mod])
z = gaussian_kde(xy)(xy)
# ax.plot(target_ldos.density, target_ldos.density / target_ldos.density, 'k-')
# ax.scatter(target_density, pred_density, c=z, s=100, edgecolor='')
im = ax.scatter(x_mod, y_mod, c=z, edgecolor='')
ax.plot([0, target_max], [0, target_max], 'k-')
ax.legend(['DFT LDOS Target Density', 'ML-DFT Pred Density Errors'])
ax.set_xlabel('DFT LDOS Target Electron Density ($e^{-}$/$A^{3}$)')
ax.set_ylabel('ML-DFT Pred Electron Density ($e^{-}$/$A^{3}$)')
# cbar = fig.colorbar(im, ax=ax)
ax.set_xlim([0, target_max])
ax.set_ylim([0, target_max])
plt.tight_layout()
density_fname = "/pred_target_density_diffs_model%d_snapshot%d.%s" % (idx, (i + args.snapshot_offset), file_format)
plt.savefig(args.output_dir + density_fname, format=file_format)
print("\nDensity diff plot created, Model: %d, Snapshot: %d" % (idx, (i + args.snapshot_offset)))
# exit(0);
# Density Error Slice Plots
# font = {'weight' : 'bold',
# 'size' : 40}
#
# matplotlib.rc('font', **font)
#
# z_slices = np.round(np.linspace(0, target_ldos.ldos.shape[2] - 1, args.num_slices)).astype(int)
#
# fig, ax = plt.subplots(args.num_slices,1)
#
# fig.set_figheight(20 * args.num_slices)
# fig.set_figwidth(20)
#
# cbar_err_max = np.max(np.abs(pred_ldos.density - target_ldos.density))
#
# for i in range(args.num_slices):
#
# gs = pred_ldos.cell_volume ** (1/3.)
#
# xgrid = np.linspace(0, target_ldos.ldos.shape[0], target_ldos.ldos.shape[0]) * gs
# ygrid = np.linspace(0, target_ldos.ldos.shape[1], target_ldos.ldos.shape[1]) * gs
#
# density_slice = np.abs(pred_ldos.density[:,:,z_slices[i]] - target_ldos.density[:,:,z_slices[i]])
#
# if (args.log):
# density_slice = np.log(density_slice)
#
# im = ax[i].contourf(xgrid, ygrid, density_slice, cmap="seismic")
# ax[i].set_title("Absolute Density Error Z-Slice at %3.1f" % (z_slices[i] * gs))
#
# cbar = fig.colorbar(im, ax=ax[i])
# cbar.set_clim(0.0, cbar_err_max)
#
# #plt.tight_layout()
#
#
# if (args.log):
# density_fname = "/pred_target_error_density_log_model%d_snapshot%d.eps" % (idx, i)
# else:
# density_fname = "/pred_target_error_density_model%d_snapshot%d.eps" % (idx, i)
#
# plt.savefig(args.output_dir + density_fname, format='eps')
#
#
#
# vmin_plot = 0.0
# vmax_plot = np.max([np.max(target_ldos.density), np.max(pred_ldos.density)])
#
# fig, ax = plt.subplots(args.num_slices,1)
#
# fig.set_figheight(20 * args.num_slices)
# fig.set_figwidth(20)
#
# for i in range(args.num_slices):
#
# gs = pred_ldos.cell_volume ** (1/3.)
#
# xgrid = np.linspace(0, pred_ldos.ldos.shape[0], pred_ldos.ldos.shape[0]) * gs
# ygrid = np.linspace(0, pred_ldos.ldos.shape[1], pred_ldos.ldos.shape[1]) * gs
#
# density_slice = pred_ldos.density[:,:,z_slices[i]]
#
# if (args.log):
# density_slice = np.log(density_slice)
#
# im = ax[i].contourf(xgrid, ygrid, density_slice, vmin=vmin_plot, vmax=vmax_plot, cmap="seismic")
# ax[i].set_title("Pred Density Z-Slice at %3.1f" % (z_slices[i] * gs))
#
# cbar = fig.colorbar(im, ax=ax[i])
# cbar.set_clim(0.0, vmax_plot)
#
# #plt.tight_layout()
#
#
# if (args.log):
# density_fname = "/pred_density_log_model%d_snapshot%d.eps" % (idx, i)
# else:
# density_fname = "/pred_density_model%d_snapshot%d.eps" % (idx, i)
#
# plt.savefig(args.output_dir + density_fname, format='eps')
#
#
#
# fig, ax = plt.subplots(args.num_slices,1)
# z_slices = np.array([100])
# fig.set_figheight(20 * args.num_slices)
# fig.set_figwidth(20)
# for i in range(args.num_slices):
# gs = target_ldos.cell_volume ** (1/3.)
# xgrid = np.linspace(0, target_ldos.ldos.shape[0], target_ldos.ldos.shape[0]) * gs
# ygrid = np.linspace(0, target_ldos.ldos.shape[1], target_ldos.ldos.shape[1]) * gs
# density_slice = target_ldos.density[:,:,z_slices[i]]
# if (args.log):
# density_slice = np.log(density_slice)
#im = ax[i].contourf(xgrid, ygrid, density_slice, vmin=vmin_plot, vmax=vmax_plot, cmap="seismic")
# im = ax[i].contourf(xgrid, ygrid, density_slice, cmap="seismic")
#ax[i].set_title("Target Density Z-Slice at %3.1f" % (z_slices[i] * gs))
# cbar = fig.colorbar(im, ax=ax[i])
# cbar.set_clim(0.0, vmax_plot)
# plt.tight_layout()
#if (args.log):
# density_fname = "/target_density_log_model%d_snapshot%d.eps" % (idx, i)
# else:
# density_fname = "/target_density_model%d_snapshot%d.%s" % (idx, i, file_format)
# plt.savefig(args.output_dir + density_fname, format=file_format)
# print("Density plots created")
plt.close('all')
# break
print("\n\nSuccess!\n\n")
hvd.shutdown()
| 35.884706 | 142 | 0.602354 |
c8f7851106cddfdd588a79cb02d5ea197b0d65d7 | 1,997 | py | Python | ros/src/twist_controller/twist_controller.py | chmod600/test-capstone | 4777f687ab78e9173d9993cb3b55087dde84e984 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | chmod600/test-capstone | 4777f687ab78e9173d9993cb3b55087dde84e984 | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | chmod600/test-capstone | 4777f687ab78e9173d9993cb3b55087dde84e984 | [
"MIT"
] | null | null | null | import rospy
from pid import PID
from yaw_controller import YawController
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, decel_limit,
wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
self.yaw_controller = YawController(
wheel_base,
steer_ratio,
0.1,
max_lat_accel,
max_steer_angle
)
kp = 0.3
ki = 0.1
kd = 0.
mn = 0.
mx = 0.2
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5
ts = 0.02
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.decel_limit = decel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(
linear_vel,
angular_vel,
current_vel
)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0. and current_vel < 0.1:
throttle = 0
brake = 400
elif throttle < .1 and vel_error < 0:
throttle = 0
decel = max(vel_error, self.decel_limit)
brake = abs(decel) * self.vehicle_mass * self.wheel_radius
return throttle, brake, steering
| 26.626667 | 87 | 0.596395 |
9f193260532bddfa28670e0a6c21c9ac791d5db4 | 11,341 | py | Python | intersight/model/niatelemetry_fabric_pod_ss_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/niatelemetry_fabric_pod_ss_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/niatelemetry_fabric_pod_ss_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.asset_device_registration_relationship import AssetDeviceRegistrationRelationship
globals()['AssetDeviceRegistrationRelationship'] = AssetDeviceRegistrationRelationship
class NiatelemetryFabricPodSsAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'NIATELEMETRY.FABRICPODSS': "niatelemetry.FabricPodSs",
},
('object_type',): {
'NIATELEMETRY.FABRICPODSS': "niatelemetry.FabricPodSs",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'dn': (str,), # noqa: E501
'fabric_pod_prof': (str,), # noqa: E501
'pod_blk': (str,), # noqa: E501
'pod_pol_grp': (str,), # noqa: E501
'pol_list': (str,), # noqa: E501
'record_type': (str,), # noqa: E501
'record_version': (str,), # noqa: E501
'site_name': (str,), # noqa: E501
'registered_device': (AssetDeviceRegistrationRelationship,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'dn': 'Dn', # noqa: E501
'fabric_pod_prof': 'FabricPodProf', # noqa: E501
'pod_blk': 'PodBlk', # noqa: E501
'pod_pol_grp': 'PodPolGrp', # noqa: E501
'pol_list': 'PolList', # noqa: E501
'record_type': 'RecordType', # noqa: E501
'record_version': 'RecordVersion', # noqa: E501
'site_name': 'SiteName', # noqa: E501
'registered_device': 'RegisteredDevice', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NiatelemetryFabricPodSsAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "niatelemetry.FabricPodSs", must be one of ["niatelemetry.FabricPodSs", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "niatelemetry.FabricPodSs", must be one of ["niatelemetry.FabricPodSs", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
dn (str): Dn of the Fabric podS for APIC.. [optional] # noqa: E501
fabric_pod_prof (str): Parent PodP of the Fabric podS for APIC.. [optional] # noqa: E501
pod_blk (str): Pod Block for the above Fabric PodS.. [optional] # noqa: E501
pod_pol_grp (str): Policy Group for the above Fabric PodS.. [optional] # noqa: E501
pol_list (str): List of Dn of CommPols, SnmpPols and TimePols.. [optional] # noqa: E501
record_type (str): Type of record DCNM / APIC / SE. This determines the type of platform where inventory was collected.. [optional] # noqa: E501
record_version (str): Version of record being pushed. This determines what was the API version for data available from the device.. [optional] # noqa: E501
site_name (str): Name of the APIC site from which this data is being collected.. [optional] # noqa: E501
registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "niatelemetry.FabricPodSs")
object_type = kwargs.get('object_type', "niatelemetry.FabricPodSs")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 52.748837 | 1,678 | 0.635129 |
e012651d24a784d1e0744c54bf37a121714336bf | 6,648 | py | Python | tests/python/unittest/test_numpy_default_dtype.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | 1 | 2021-11-09T01:40:17.000Z | 2021-11-09T01:40:17.000Z | tests/python/unittest/test_numpy_default_dtype.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_numpy_default_dtype.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | 1 | 2018-07-19T00:43:30.000Z | 2018-07-19T00:43:30.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import numpy as _np
import mxnet
from mxnet import npx
from mxnet import numpy as np
from mxnet.test_utils import use_np, use_np_default_dtype
class DtypeOpArgMngr(object):
"""Operator argument manager for storing operator workloads."""
_args = {}
@staticmethod
def add_workload(name, *args, **kwargs):
if name not in DtypeOpArgMngr._args:
DtypeOpArgMngr._args[name] = []
DtypeOpArgMngr._args[name].append({'args': args, 'kwargs': kwargs})
@staticmethod
def get_workloads(name):
return DtypeOpArgMngr._args.get(name, None)
_NUMPY_DTYPE_DEFAULT_FUNC_LIST = [
'array',
'ones',
'zeros',
'eye',
# 'full', dtype of mx.np.full now infers from fill_value
'identity',
'linspace',
'logspace',
'mean',
'hanning',
'hamming',
'blackman',
'random.gamma',
'random.uniform',
'random.normal',
'random.chisquare',
'true_divide'
]
def _add_dtype_workload_array():
DtypeOpArgMngr.add_workload('array', [1, 2, 3])
def _add_dtype_workload_ones():
DtypeOpArgMngr.add_workload('ones', 5)
DtypeOpArgMngr.add_workload('ones', (5,))
def _add_dtype_workload_zeros():
DtypeOpArgMngr.add_workload('zeros', 5)
DtypeOpArgMngr.add_workload('zeros', (5,))
def _add_dtype_workload_eye():
DtypeOpArgMngr.add_workload('eye', 3)
DtypeOpArgMngr.add_workload('eye', 3, k=1)
def _add_dtype_workload_full():
DtypeOpArgMngr.add_workload('full', (2, 2), 10)
def _add_dtype_workload_identity():
DtypeOpArgMngr.add_workload('identity', 3)
def _add_dtype_workload_linspace():
DtypeOpArgMngr.add_workload('linspace', 2.0, 3.0, num=5)
DtypeOpArgMngr.add_workload('linspace', 2.0, 3.0, num=5, endpoint=False)
def _add_dtype_workload_logspace():
DtypeOpArgMngr.add_workload('logspace', 2.0, 3.0, num=4)
DtypeOpArgMngr.add_workload('logspace', 2.0, 3.0, num=4, endpoint=False)
DtypeOpArgMngr.add_workload('logspace', 2.0, 3.0, num=4, base=2.0)
def _add_dtype_workload_mean():
DtypeOpArgMngr.add_workload('mean', np.random.randint(0, 3,size=2))
def _add_dtype_workload_hanning():
DtypeOpArgMngr.add_workload('hanning', 3)
def _add_dtype_workload_hamming():
DtypeOpArgMngr.add_workload('hamming', 3)
def _add_dtype_workload_blackman():
DtypeOpArgMngr.add_workload('blackman', 3)
def _add_dtype_workload_random_uniform():
DtypeOpArgMngr.add_workload('random.uniform', -1, 1, size=3)
def _add_dtype_workload_random_normal():
DtypeOpArgMngr.add_workload('random.normal', 0, 0.1, 3)
def _add_dtype_workload_random_gamma():
DtypeOpArgMngr.add_workload('random.gamma', 3)
def _add_dtype_workload_random_chisquare():
DtypeOpArgMngr.add_workload('random.chisquare', 2, 4)
def _add_dtype_workload_true_divide():
DtypeOpArgMngr.add_workload('true_divide', np.array([1,2], dtype=int), 4)
DtypeOpArgMngr.add_workload('true_divide', np.array([1,2], dtype=int), 2.0)
DtypeOpArgMngr.add_workload('true_divide', 4.0, np.array([1,2], dtype=int))
def _prepare_workloads():
_add_dtype_workload_array()
_add_dtype_workload_ones()
_add_dtype_workload_zeros()
_add_dtype_workload_eye()
_add_dtype_workload_full()
_add_dtype_workload_identity()
_add_dtype_workload_linspace()
_add_dtype_workload_logspace()
_add_dtype_workload_mean()
_add_dtype_workload_hanning()
_add_dtype_workload_hamming()
_add_dtype_workload_blackman()
_add_dtype_workload_random_gamma()
_add_dtype_workload_random_uniform()
_add_dtype_workload_random_normal()
_add_dtype_workload_true_divide()
_add_dtype_workload_random_chisquare()
_prepare_workloads()
@use_np
@use_np_default_dtype
def check_np_default_dtype(op, *args, **kwargs):
assert op(*args, **kwargs).dtype == 'float64'
@use_np
def check_deepnp_default_dtype(op, *args, **kwargs):
assert op(*args, **kwargs).dtype == 'float32'
def check_default_dtype(op_list):
for op_name in op_list:
print('Default dtype test:', op_name)
workloads = DtypeOpArgMngr.get_workloads(op_name)
strs = op_name.split('.')
if len(strs) == 1:
op = getattr(np, op_name)
elif len(strs) == 2:
op = getattr(getattr(np, strs[0]), strs[1])
else:
assert False
assert workloads is not None, 'Workloads for operator `{}` has not been ' \
'added for checking default dtype with the ' \
'official NumPy and the deep NumPy.'.format(op_name)
for workload in workloads:
check_np_default_dtype(op, *workload['args'], **workload['kwargs'])
check_deepnp_default_dtype(op, *workload['args'], **workload['kwargs'])
def test_default_float_dtype():
import platform
if 'Windows' not in platform.system():
check_default_dtype(_NUMPY_DTYPE_DEFAULT_FUNC_LIST)
@use_np
def test_np_indices_default_dtype():
import platform
if 'Windows' not in platform.system():
@use_np_default_dtype
def check_np_indices_default_dtype():
assert np.indices((3,)).dtype == 'int64'
def check_deepnp_indices_default_dtype():
assert np.indices((3,)).dtype == 'int64'
check_deepnp_indices_default_dtype()
check_np_indices_default_dtype()
@use_np
def test_np_arange_default_dtype():
import platform
if 'Windows' not in platform.system():
@use_np_default_dtype
def check_np_indices_default_dtype():
assert np.arange(3, 7, 2).dtype == 'int64'
def check_deepnp_indices_default_dtype():
assert np.arange(3, 7, 2).dtype == 'float32'
check_deepnp_indices_default_dtype()
check_np_indices_default_dtype()
| 29.678571 | 90 | 0.700812 |
dac5670c50150fa25ea8ccba465b17fa32a9aad4 | 2,816 | py | Python | library/scons-2.1.0/build/lib/SCons/Tool/ifl.py | zhaomangang/ElectromagneticDog | 471346f1c174861de690647c59d4f2a94be23e89 | [
"Apache-2.0"
] | 99 | 2017-08-08T01:37:45.000Z | 2021-05-15T14:39:43.000Z | library/scons-2.1.0/build/lib/SCons/Tool/ifl.py | zhaomangang/ElectromagneticDog | 471346f1c174861de690647c59d4f2a94be23e89 | [
"Apache-2.0"
] | 3 | 2017-08-23T06:39:15.000Z | 2019-09-06T18:18:17.000Z | library/scons-2.1.0/build/lib/SCons/Tool/ifl.py | zhaomangang/ElectromagneticDog | 471346f1c174861de690647c59d4f2a94be23e89 | [
"Apache-2.0"
] | 41 | 2017-08-08T01:38:41.000Z | 2022-02-15T12:12:21.000Z | """SCons.Tool.ifl
Tool-specific initialization for the Intel Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifl.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifl to an Environment."""
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
env['FORTRAN'] = 'ifl'
env['SHFORTRAN'] = '$FORTRAN'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANINCFLAGS /c $SOURCES /Fo$TARGET'
def exists(env):
return env.Detect('ifl')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 38.575342 | 121 | 0.730469 |
5096b99527ce5dd8a902dcab28d480b7c967c819 | 3,042 | py | Python | utils/events.py | sensianhermes/SENSIUSERBOT | 51473a0a98861192901fb5cc3dc297447a821daa | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4 | 2022-03-03T01:31:48.000Z | 2022-03-26T00:15:41.000Z | utils/events.py | sensianhermes/SENSIUSERBOT | 51473a0a98861192901fb5cc3dc297447a821daa | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2022-03-16T02:54:27.000Z | 2022-03-17T09:17:12.000Z | utils/events.py | sensianhermes/SENSIUSERBOT | 51473a0a98861192901fb5cc3dc297447a821daa | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2022-03-12T00:13:54.000Z | 2022-03-14T07:05:04.000Z | import pybase64
from telethon.tl.functions.channels import JoinChannelRequest as Get
from telethon.tl.types import MessageEntityMentionName
from .logger import logging
from .tools import edit_delete
LOGS = logging.getLogger(__name__)
async def get_user_from_event(
event, manevent=None, secondgroup=None, nogroup=False, noedits=False
):
if manevent is None:
manevent = event
if nogroup is False:
if secondgroup:
args = event.pattern_match.group(2).split(" ", 1)
else:
args = event.pattern_match.group(1).split(" ", 1)
extra = None
try:
if args:
user = args[0]
if len(args) > 1:
extra = "".join(args[1:])
if user.isnumeric() or (user.startswith("-") and user[1:].isnumeric()):
user = int(user)
if event.message.entities:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj, extra
if isinstance(user, int) or user.startswith("@"):
user_obj = await event.client.get_entity(user)
return user_obj, extra
except Exception as e:
LOGS.error(str(e))
try:
if nogroup is False:
if secondgroup:
extra = event.pattern_match.group(2)
else:
extra = event.pattern_match.group(1)
if event.is_private:
user_obj = await event.get_chat()
return user_obj, extra
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.sender_id is None:
if not noedits:
await edit_delete(
manevent, "**ERROR: Dia adalah anonymous admin!**", 60
)
return None, None
user_obj = await event.client.get_entity(previous_message.sender_id)
return user_obj, extra
if not args:
if not noedits:
await edit_delete(
manevent,
"**Mohon Reply Pesan atau Berikan User ID/Username pengguna!**",
60,
)
return None, None
except Exception as e:
LOGS.error(str(e))
if not noedits:
await edit_delete(
manevent,
"**Mohon Reply Pesan atau Berikan User ID/Username pengguna!**",
60,
)
return None, None
async def checking(client):
gocheck = str(pybase64.b64decode("QEx1bmF0aWMwZGU="))[2:13]
checker = str(pybase64.b64decode("QFNoYXJpbmdVc2VyYm90"))[2:17]
if client:
try:
await client(Get(gocheck))
await client(Get(checker))
except BaseException:
pass
| 34.965517 | 86 | 0.568047 |
148f64076cc0d592d5b7370e477d0bfd9a8ac24b | 601 | py | Python | inscrawler/logger.py | aonurdemir/instagram-crawler | ffb11e3f10ab6ca276fecd4ca527b7661db2704d | [
"MIT"
] | null | null | null | inscrawler/logger.py | aonurdemir/instagram-crawler | ffb11e3f10ab6ca276fecd4ca527b7661db2704d | [
"MIT"
] | null | null | null | inscrawler/logger.py | aonurdemir/instagram-crawler | ffb11e3f10ab6ca276fecd4ca527b7661db2704d | [
"MIT"
] | null | null | null | import logging
# Create a custom logger
logger = logging.getLogger(__name__)
# Create handlers
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler('file.log')
c_handler.setLevel(logging.WARNING)
f_handler.setLevel(logging.ERROR)
# Create formatters and add it to handlers
c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to the logger
logger.addHandler(c_handler)
logger.addHandler(f_handler)
| 28.619048 | 84 | 0.77371 |
3456ba6afcb20561bf90ac5db95ed5f3ee4439fb | 569 | py | Python | 2018/day02/part1.py | bsoyka/aoc | e5f1da6c693a1841394248863e591cfc3bcae0e7 | [
"MIT"
] | 3 | 2020-12-10T06:14:40.000Z | 2021-12-05T06:50:30.000Z | 2018/day02/part1.py | bsoyka/advent-of-code | e5f1da6c693a1841394248863e591cfc3bcae0e7 | [
"MIT"
] | 4 | 2020-03-21T18:42:51.000Z | 2021-12-04T06:57:13.000Z | 2018/day02/part1.py | bsoyka/solutions | e5f1da6c693a1841394248863e591cfc3bcae0e7 | [
"MIT"
] | null | null | null | from collections import Counter
from pathlib import Path
with (Path(__file__).parent / "input.txt").open() as f:
boxes = [line.strip() for line in f]
two_letters = 0
three_letters = 0
for box in boxes:
two = False
three = False
items = Counter(box).items()
for letter, count in items:
if count == 2 and not two:
two_letters += 1
two = True
elif count == 3 and not three:
three_letters += 1
three = True
if two and three:
break
print(two_letters * three_letters)
| 23.708333 | 55 | 0.588752 |
b896ed7eb6fe4c5426841b4fa45fa5d87d4e027d | 708 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/models.py | Saymmic/cookiecutter-django | 4f32f1c096a842ff872728f5035a8092a16426d5 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/models.py | Saymmic/cookiecutter-django | 4f32f1c096a842ff872728f5035a8092a16426d5 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/models.py | Saymmic/cookiecutter-django | 4f32f1c096a842ff872728f5035a8092a16426d5 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from {{ cookiecutter.project_slug }}.base.models import BaseModel
class User(BaseModel, AbstractUser):
"""
Default custom user model for {{cookiecutter.project_name}}.
If adding fields that need to be filled at user signup,
check forms.SignupForm and forms.SocialSignupForms accordingly.
"""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
| 35.4 | 70 | 0.747175 |
35da63f90e5b062a6c70a401c7ff7049298e76f8 | 10,010 | py | Python | openfermioncirq/trotter/algorithms/linear_swap_network.py | mpharrigan/OpenFermion-Cirq | 623a2431227be3c2ff57a9d0d3a8aead6658ee26 | [
"Apache-2.0"
] | null | null | null | openfermioncirq/trotter/algorithms/linear_swap_network.py | mpharrigan/OpenFermion-Cirq | 623a2431227be3c2ff57a9d0d3a8aead6658ee26 | [
"Apache-2.0"
] | null | null | null | openfermioncirq/trotter/algorithms/linear_swap_network.py | mpharrigan/OpenFermion-Cirq | 623a2431227be3c2ff57a9d0d3a8aead6658ee26 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Trotter algorithm using the "fermionic simulation gate"."""
from typing import Optional, Sequence, Tuple
import cirq
from openfermion import DiagonalCoulombHamiltonian
from openfermioncirq import (
CRxxyy,
CRyxxy,
Rxxyy,
Ryxxy,
rot11,
rot111,
swap_network)
from openfermioncirq.trotter.trotter_algorithm import (
Hamiltonian,
TrotterStep,
TrotterAlgorithm)
class LinearSwapNetworkTrotterAlgorithm(TrotterAlgorithm):
"""A Trotter algorithm using the "fermionic simulation gate".
This algorithm simulates a DiagonalCoulombHamiltonian. It uses layers of
fermionic swap networks to simultaneously simulate the one- and two-body
interactions.
This algorithm is described in arXiv:1711.04789.
"""
supported_types = {DiagonalCoulombHamiltonian}
def symmetric(self, hamiltonian: Hamiltonian) -> Optional[TrotterStep]:
return SymmetricLinearSwapNetworkTrotterStep(hamiltonian)
def asymmetric(self, hamiltonian: Hamiltonian) -> Optional[TrotterStep]:
return AsymmetricLinearSwapNetworkTrotterStep(hamiltonian)
def controlled_symmetric(self, hamiltonian: Hamiltonian
) -> Optional[TrotterStep]:
return ControlledSymmetricLinearSwapNetworkTrotterStep(hamiltonian)
def controlled_asymmetric(self, hamiltonian: Hamiltonian
) -> Optional[TrotterStep]:
return ControlledAsymmetricLinearSwapNetworkTrotterStep(hamiltonian)
LINEAR_SWAP_NETWORK = LinearSwapNetworkTrotterAlgorithm()
class SymmetricLinearSwapNetworkTrotterStep(TrotterStep):
def trotter_step(
self,
qubits: Sequence[cirq.Qid],
time: float,
control_qubit: Optional[cirq.Qid]=None
) -> cirq.OP_TREE:
n_qubits = len(qubits)
# Apply one- and two-body interactions for half of the full time
def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE:
yield Rxxyy(
0.5 * self.hamiltonian.one_body[p, q].real * time).on(a, b)
yield Ryxxy(
0.5 * self.hamiltonian.one_body[p, q].imag * time).on(a, b)
yield rot11(rads=
-self.hamiltonian.two_body[p, q] * time).on(a, b)
yield swap_network(qubits, one_and_two_body_interaction, fermionic=True)
qubits = qubits[::-1]
# Apply one-body potential for the full time
yield (cirq.Rz(rads=
-self.hamiltonian.one_body[i, i].real * time).on(qubits[i])
for i in range(n_qubits))
# Apply one- and two-body interactions for half of the full time
# This time, reorder the operations so that the entire Trotter step is
# symmetric
def one_and_two_body_interaction_reverse_order(p, q, a, b
) -> cirq.OP_TREE:
yield rot11(rads=
-self.hamiltonian.two_body[p, q] * time).on(a, b)
yield Ryxxy(
0.5 * self.hamiltonian.one_body[p, q].imag * time).on(a, b)
yield Rxxyy(
0.5 * self.hamiltonian.one_body[p, q].real * time).on(a, b)
yield swap_network(qubits, one_and_two_body_interaction_reverse_order,
fermionic=True, offset=True)
class ControlledSymmetricLinearSwapNetworkTrotterStep(TrotterStep):
def trotter_step(
self,
qubits: Sequence[cirq.Qid],
time: float,
control_qubit: Optional[cirq.Qid]=None
) -> cirq.OP_TREE:
n_qubits = len(qubits)
# Apply one- and two-body interactions for half of the full time
def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE:
yield CRxxyy(
0.5 * self.hamiltonian.one_body[p, q].real * time).on(
control_qubit, a, b)
yield CRyxxy(
0.5 * self.hamiltonian.one_body[p, q].imag * time).on(
control_qubit, a, b)
yield rot111(-self.hamiltonian.two_body[p, q] * time).on(
control_qubit, a, b)
yield swap_network(
qubits, one_and_two_body_interaction, fermionic=True)
qubits = qubits[::-1]
# Apply one-body potential for the full time
yield (rot11(rads=
-self.hamiltonian.one_body[i, i].real * time).on(
control_qubit, qubits[i])
for i in range(n_qubits))
# Apply one- and two-body interactions for half of the full time
# This time, reorder the operations so that the entire Trotter step is
# symmetric
def one_and_two_body_interaction_reverse_order(p, q, a, b
) -> cirq.OP_TREE:
yield rot111(-self.hamiltonian.two_body[p, q] * time).on(
control_qubit, a, b)
yield CRyxxy(
0.5 * self.hamiltonian.one_body[p, q].imag * time).on(
control_qubit, a, b)
yield CRxxyy(
0.5 * self.hamiltonian.one_body[p, q].real * time).on(
control_qubit, a, b)
yield swap_network(qubits, one_and_two_body_interaction_reverse_order,
fermionic=True, offset=True)
# Apply phase from constant term
yield cirq.Rz(rads=
-self.hamiltonian.constant * time).on(control_qubit)
class AsymmetricLinearSwapNetworkTrotterStep(TrotterStep):
def trotter_step(
self,
qubits: Sequence[cirq.Qid],
time: float,
control_qubit: Optional[cirq.Qid]=None
) -> cirq.OP_TREE:
n_qubits = len(qubits)
# Apply one- and two-body interactions for the full time
def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE:
yield Rxxyy(
self.hamiltonian.one_body[p, q].real * time).on(a, b)
yield Ryxxy(
self.hamiltonian.one_body[p, q].imag * time).on(a, b)
yield rot11(rads=
-2 * self.hamiltonian.two_body[p, q] * time).on(a, b)
yield swap_network(qubits, one_and_two_body_interaction, fermionic=True)
qubits = qubits[::-1]
# Apply one-body potential for the full time
yield (cirq.Rz(rads=
-self.hamiltonian.one_body[i, i].real * time).on(qubits[i])
for i in range(n_qubits))
def step_qubit_permutation(self,
qubits: Sequence[cirq.Qid],
control_qubit: Optional[cirq.Qid]=None
) -> Tuple[Sequence[cirq.Qid],
Optional[cirq.Qid]]:
# A Trotter step reverses the qubit ordering
return qubits[::-1], None
def finish(self,
qubits: Sequence[cirq.Qid],
n_steps: int,
control_qubit: Optional[cirq.Qid]=None,
omit_final_swaps: bool=False
) -> cirq.OP_TREE:
# If the number of Trotter steps is odd, possibly swap qubits back
if n_steps & 1 and not omit_final_swaps:
yield swap_network(qubits, fermionic=True)
class ControlledAsymmetricLinearSwapNetworkTrotterStep(TrotterStep):
def trotter_step(
self,
qubits: Sequence[cirq.Qid],
time: float,
control_qubit: Optional[cirq.Qid]=None
) -> cirq.OP_TREE:
n_qubits = len(qubits)
# Apply one- and two-body interactions for the full time
def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE:
yield CRxxyy(
self.hamiltonian.one_body[p, q].real * time).on(
control_qubit, a, b)
yield CRyxxy(
self.hamiltonian.one_body[p, q].imag * time).on(
control_qubit, a, b)
yield rot111(-2 * self.hamiltonian.two_body[p, q] * time).on(
control_qubit, a, b)
yield swap_network(qubits, one_and_two_body_interaction, fermionic=True)
qubits = qubits[::-1]
# Apply one-body potential for the full time
yield (rot11(rads=
-self.hamiltonian.one_body[i, i].real * time).on(
control_qubit, qubits[i])
for i in range(n_qubits))
# Apply phase from constant term
yield cirq.Rz(rads=
-self.hamiltonian.constant * time).on(control_qubit)
def step_qubit_permutation(self,
qubits: Sequence[cirq.Qid],
control_qubit: Optional[cirq.Qid]=None
) -> Tuple[Sequence[cirq.Qid],
Optional[cirq.Qid]]:
# A Trotter step reverses the qubit ordering
return qubits[::-1], control_qubit
def finish(self,
qubits: Sequence[cirq.Qid],
n_steps: int,
control_qubit: Optional[cirq.Qid]=None,
omit_final_swaps: bool=False
) -> cirq.OP_TREE:
# If the number of Trotter steps is odd, possibly swap qubits back
if n_steps & 1 and not omit_final_swaps:
yield swap_network(qubits, fermionic=True)
| 39.254902 | 80 | 0.587113 |
8716c22b2742052fb18d2b89ad7f46b3fb4be27b | 3,352 | py | Python | tigerpath/majors_and_certificates/scripts/verifier_tester.py | richardhuangz/tigerpath | 6f57dc3dffbc773dba7d27cf5fe89b1c92d1b214 | [
"MIT"
] | 10 | 2020-07-23T15:41:17.000Z | 2021-11-29T02:25:54.000Z | tigerpath/majors_and_certificates/scripts/verifier_tester.py | richardhuangz/tigerpath | 6f57dc3dffbc773dba7d27cf5fe89b1c92d1b214 | [
"MIT"
] | 133 | 2018-07-13T05:05:10.000Z | 2022-03-08T22:05:59.000Z | tigerpath/majors_and_certificates/scripts/verifier_tester.py | churichard/tigerpath | 9eda53e171cbd4c97ae2178da2a9b4ca433a91d8 | [
"MIT"
] | 12 | 2018-08-19T07:02:34.000Z | 2021-12-23T02:14:41.000Z | #!/usr/bin/env python3
import json
import yaml
from pprint import pprint
import jsonschema # must be installed via pip
import os
import collections
import filecmp
import shutil
from . import verifier
DIR_PATH = os.path.dirname(os.path.realpath(__file__)) # the directory containing this file
SCHEMA_LOCATION = os.path.join(DIR_PATH, "schema.json") # path to the requirements JSON schema
TESTS_LOCATION = os.path.join(DIR_PATH, "verifier_tests") # folder where the test files are stored
def _json_format(obj):
return json.dumps(obj, sort_keys=False, indent=2, separators=(',', ': ')) + "\n"
def main():
test_failed = None
for filename in sorted(os.listdir(TESTS_LOCATION)):
if filename.endswith(".test"):
print("Testing: " + filename)
file_path = os.path.join(TESTS_LOCATION, filename)
with open (file_path, "r", encoding="utf8") as f:
req_name = f.readline()[:-1]
year = int(f.readline())
courses = yaml.safe_load(f)
if req_name in ["AB", "BSE"]: # checking degree. No validation for degree jsons
satisfied, courses, req_tree = verifier.check_degree(req_name, courses, year)
elif "Certificate:" in req_name:
req_name = ''.join(req_name.split('Certificate:')[1:])
major_filename = req_name + "_" + str(year) + ".json"
major_filepath = os.path.join(DIR_PATH, verifier.CERTIFICATES_LOCATION, major_filename)
with open(major_filepath, 'r', encoding="utf8") as f:
requirements = yaml.safe_load(f)
with open(SCHEMA_LOCATION, 'r', encoding="utf8") as s:
schema = yaml.safe_load(s)
jsonschema.validate(requirements,schema)
satisfied, courses, req_tree = verifier.check_certificate(req_name, courses, year)
else: # checking major
major_filename = req_name + "_" + str(year) + ".json"
major_filepath = os.path.join(DIR_PATH, verifier.MAJORS_LOCATION, major_filename)
with open(major_filepath, 'r', encoding="utf8") as f:
requirements = yaml.safe_load(f)
with open(SCHEMA_LOCATION, 'r', encoding="utf8") as s:
schema = yaml.safe_load(s)
jsonschema.validate(requirements,schema)
satisfied, courses, req_tree = verifier.check_major(req_name, courses, year)
with open (file_path+".out", "w", encoding="utf8") as f:
f.write(_json_format(courses))
f.write("\n")
f.write(_json_format(req_tree))
# use the most informative diff output
if shutil.which("colordiff"):
diff = "colordiff"
elif shutil.which("diff"):
diff = "diff"
else:
diff = "cmp"
# check if output is correct
if not filecmp.cmp(file_path+".expected", file_path+".out"):
print("--- Failed")
if test_failed == None:
test_failed = "echo 'Failed test:' %s; %s %s %s | head -10" % (file_path, diff, file_path+".expected", file_path+".out")
if test_failed:
os.system(test_failed)
if __name__ == "__main__":
main() | 47.211268 | 140 | 0.588604 |
106de0a74e7d2abaa269404accbf057da9564568 | 70,655 | py | Python | utils/model_utils.py | MIC-DKFZ/DetectionAndRegression | 40f3cb92ec6447767bd85b62a015b0d50e32ad26 | [
"Apache-2.0"
] | 40 | 2019-09-24T08:11:35.000Z | 2022-02-23T13:49:01.000Z | utils/model_utils.py | MIC-DKFZ/MedicalDetectionRegression | 40f3cb92ec6447767bd85b62a015b0d50e32ad26 | [
"Apache-2.0"
] | 13 | 2019-11-04T10:52:40.000Z | 2022-03-11T23:57:14.000Z | utils/model_utils.py | MIC-DKFZ/MedicalDetectionRegression | 40f3cb92ec6447767bd85b62a015b0d50e32ad26 | [
"Apache-2.0"
] | 22 | 2019-08-28T15:32:25.000Z | 2022-02-18T11:27:30.000Z | #!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Parts are based on https://github.com/multimodallearning/pytorch-mask-rcnn
published under MIT license.
"""
import warnings
warnings.filterwarnings('ignore', '.*From scipy 0.13.0, the output shape of zoom()*')
import numpy as np
import scipy.misc
import scipy.ndimage
import scipy.interpolate
from scipy.ndimage.measurements import label as lb
import torch
import tqdm
from custom_extensions.nms import nms
from custom_extensions.roi_align import roi_align
############################################################
# Segmentation Processing
############################################################
def sum_tensor(input, axes, keepdim=False):
axes = np.unique(axes)
if keepdim:
for ax in axes:
input = input.sum(ax, keepdim=True)
else:
for ax in sorted(axes, reverse=True):
input = input.sum(int(ax))
return input
def get_one_hot_encoding(y, n_classes):
"""
transform a numpy label array to a one-hot array of the same shape.
:param y: array of shape (b, 1, y, x, (z)).
:param n_classes: int, number of classes to unfold in one-hot encoding.
:return y_ohe: array of shape (b, n_classes, y, x, (z))
"""
dim = len(y.shape) - 2
if dim == 2:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3])).astype('int32')
elif dim == 3:
y_ohe = np.zeros((y.shape[0], n_classes, y.shape[2], y.shape[3], y.shape[4])).astype('int32')
else:
raise Exception("invalid dimensions {} encountered".format(y.shape))
for cl in np.arange(n_classes):
y_ohe[:, cl][y[:, 0] == cl] = 1
return y_ohe
def dice_per_batch_inst_and_class(pred, y, n_classes, convert_to_ohe=True, smooth=1e-8):
'''
computes dice scores per batch instance and class.
:param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)
:param y: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes]
:param n_classes: int
:return: dice scores of shape (b, c)
'''
if convert_to_ohe:
pred = get_one_hot_encoding(pred, n_classes)
y = get_one_hot_encoding(y, n_classes)
axes = tuple(range(2, len(pred.shape)))
intersect = np.sum(pred*y, axis=axes)
denominator = np.sum(pred, axis=axes)+np.sum(y, axis=axes)
dice = (2.0*intersect + smooth) / (denominator + smooth)
return dice
def dice_per_batch_and_class(pred, targ, n_classes, convert_to_ohe=True, smooth=1e-8):
'''
computes dice scores per batch and class.
:param pred: prediction array of shape (b, 1, y, x, (z)) (e.g. softmax prediction with argmax over dim 1)
:param targ: ground truth array of shape (b, 1, y, x, (z)) (contains int [0, ..., n_classes])
:param n_classes: int
:param smooth: Laplacian smooth, https://en.wikipedia.org/wiki/Additive_smoothing
:return: dice scores of shape (b, c)
'''
if convert_to_ohe:
pred = get_one_hot_encoding(pred, n_classes)
targ = get_one_hot_encoding(targ, n_classes)
axes = (0, *list(range(2, len(pred.shape)))) #(0,2,3(,4))
intersect = np.sum(pred * targ, axis=axes)
denominator = np.sum(pred, axis=axes) + np.sum(targ, axis=axes)
dice = (2.0 * intersect + smooth) / (denominator + smooth)
assert dice.shape==(n_classes,), "dice shp {}".format(dice.shape)
return dice
def batch_dice(pred, y, false_positive_weight=1.0, smooth=1e-6):
'''
compute soft dice over batch. this is a differentiable score and can be used as a loss function.
only dice scores of foreground classes are returned, since training typically
does not benefit from explicit background optimization. Pixels of the entire batch are considered a pseudo-volume to compute dice scores of.
This way, single patches with missing foreground classes can not produce faulty gradients.
:param pred: (b, c, y, x, (z)), softmax probabilities (network output).
:param y: (b, c, y, x, (z)), one hote encoded segmentation mask.
:param false_positive_weight: float [0,1]. For weighting of imbalanced classes,
reduces the penalty for false-positive pixels. Can be beneficial sometimes in data with heavy fg/bg imbalances.
:return: soft dice score (float).This function discards the background score and returns the mena of foreground scores.
'''
if len(pred.size()) == 4:
axes = (0, 2, 3)
intersect = sum_tensor(pred * y, axes, keepdim=False)
denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
return torch.mean(( (2*intersect + smooth) / (denom + smooth))[1:]) #only fg dice here.
elif len(pred.size()) == 5:
axes = (0, 2, 3, 4)
intersect = sum_tensor(pred * y, axes, keepdim=False)
denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
return torch.mean(( (2*intersect + smooth) / (denom + smooth))[1:]) #only fg dice here.
else:
raise ValueError('wrong input dimension in dice loss')
############################################################
# Bounding Boxes
############################################################
def compute_iou_2D(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2] THIS IS THE GT BOX
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_iou_3D(box, boxes, box_volume, boxes_volume):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2, z1, z2] (typically gt box)
boxes: [boxes_count, (y1, x1, y2, x2, z1, z2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
z1 = np.maximum(box[4], boxes[:, 4])
z2 = np.minimum(box[5], boxes[:, 5])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) * np.maximum(z2 - z1, 0)
union = box_volume + boxes_volume[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)]. / 3D: (z1, z2))
For better performance, pass the largest set first and the smaller second.
:return: (#boxes1, #boxes2), ious of each box of 1 machted with each of 2
"""
# Areas of anchors and GT boxes
if boxes1.shape[1] == 4:
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i] #this is the gt box
overlaps[:, i] = compute_iou_2D(box2, boxes1, area2[i], area1)
return overlaps
else:
# Areas of anchors and GT boxes
volume1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) * (boxes1[:, 5] - boxes1[:, 4])
volume2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) * (boxes2[:, 5] - boxes2[:, 4])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(boxes2.shape[0]):
box2 = boxes2[i] # this is the gt box
overlaps[:, i] = compute_iou_3D(box2, boxes1, volume2[i], volume1)
return overlaps
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)] / 3D: (z1, z2))
"""
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = torch.log(gt_height / height)
dw = torch.log(gt_width / width)
result = torch.stack([dy, dx, dh, dw], dim=1)
if box.shape[1] > 4:
depth = box[:, 5] - box[:, 4]
center_z = box[:, 4] + 0.5 * depth
gt_depth = gt_box[:, 5] - gt_box[:, 4]
gt_center_z = gt_box[:, 4] + 0.5 * gt_depth
dz = (gt_center_z - center_z) / depth
dd = torch.log(gt_depth / depth)
result = torch.stack([dy, dx, dz, dh, dw, dd], dim=1)
return result
def unmold_mask_2D(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2 = bbox
out_zoom = [y2 - y1, x2 - x1]
zoom_factor = [i / j for i, j in zip(out_zoom, mask.shape)]
mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2]) #only y,x
full_mask[y1:y2, x1:x2] = mask
return full_mask
def unmold_mask_2D_torch(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2 = bbox
out_zoom = [(y2 - y1).float(), (x2 - x1).float()]
zoom_factor = [i / j for i, j in zip(out_zoom, mask.shape)]
mask = mask.unsqueeze(0).unsqueeze(0)
mask = torch.nn.functional.interpolate(mask, scale_factor=zoom_factor)
mask = mask[0][0]
#mask = scipy.ndimage.zoom(mask.cpu().numpy(), zoom_factor, order=1).astype(np.float32)
#mask = torch.from_numpy(mask).cuda()
# Put the mask in the right location.
full_mask = torch.zeros(image_shape[:2]) # only y,x
full_mask[y1:y2, x1:x2] = mask
return full_mask
def unmold_mask_3D(mask, bbox, image_shape):
"""Converts a mask generated by the neural network into a format similar
to it's original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2, z1, z2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
y1, x1, y2, x2, z1, z2 = bbox
out_zoom = [y2 - y1, x2 - x1, z2 - z1]
zoom_factor = [i/j for i,j in zip(out_zoom, mask.shape)]
mask = scipy.ndimage.zoom(mask, zoom_factor, order=1).astype(np.float32)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:3])
full_mask[y1:y2, x1:x2, z1:z2] = mask
return full_mask
def nms_numpy(box_coords, scores, thresh):
""" non-maximum suppression on 2D or 3D boxes in numpy.
:param box_coords: [y1,x1,y2,x2 (,z1,z2)] with y1<=y2, x1<=x2, z1<=z2.
:param scores: ranking scores (higher score == higher rank) of boxes.
:param thresh: IoU threshold for clustering.
:return:
"""
y1 = box_coords[:, 0]
x1 = box_coords[:, 1]
y2 = box_coords[:, 2]
x2 = box_coords[:, 3]
assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here:
coordinates of which maxima are taken need to be the lower coordinates"""
areas = (x2 - x1) * (y2 - y1)
is_3d = box_coords.shape[1] == 6
if is_3d: # 3-dim case
z1 = box_coords[:, 4]
z2 = box_coords[:, 5]
assert np.all(z1<=z2), """"the definition of the coordinates is crucially important here:
coordinates of which maxima are taken need to be the lower coordinates"""
areas *= (z2 - z1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24)
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself, that is okay.
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
h = np.maximum(0.0, yy2 - yy1)
w = np.maximum(0.0, xx2 - xx1)
inter = h * w
if is_3d:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0.0, zz2 - zz1)
inter *= d
iou = inter / (areas[i] + areas[order] - inter)
non_matches = np.nonzero(iou <= thresh)[0] # get all elements that were not matched and discard all others.
order = order[non_matches]
keep.append(i)
return keep
############################################################
# M-RCNN
############################################################
def refine_proposals(rpn_pred_probs, rpn_pred_deltas, proposal_count, batch_anchors, cf):
"""
Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement details to anchors.
:param rpn_pred_probs: (b, n_anchors, 2)
:param rpn_pred_deltas: (b, n_anchors, (y, x, (z), log(h), log(w), (log(d))))
:return: batch_normalized_props: Proposals in normalized coordinates (b, proposal_count, (y1, x1, y2, x2, (z1), (z2), score))
:return: batch_out_proposals: Box coords + RPN foreground scores
for monitoring/plotting (b, proposal_count, (y1, x1, y2, x2, (z1), (z2), score))
"""
std_dev = torch.from_numpy(cf.rpn_bbox_std_dev[None]).float().cuda()
norm = torch.from_numpy(cf.scale).float().cuda()
anchors = batch_anchors.clone()
batch_scores = rpn_pred_probs[:, :, 1]
# norm deltas
batch_deltas = rpn_pred_deltas * std_dev
batch_normalized_props = []
batch_out_proposals = []
# loop over batch dimension.
for ix in range(batch_scores.shape[0]):
scores = batch_scores[ix]
deltas = batch_deltas[ix]
non_nans = deltas == deltas
assert torch.all(non_nans), "deltas have nans: {}".format(deltas[~non_nans])
non_nans = anchors == anchors
assert torch.all(non_nans), "anchors have nans: {}".format(anchors[~non_nans])
# improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(cf.pre_nms_limit, anchors.size()[0])
scores, order = scores.sort(descending=True)
order = order[:pre_nms_limit]
scores = scores[:pre_nms_limit]
deltas = deltas[order, :]
# apply deltas to anchors to get refined anchors and filter with non-maximum suppression.
if batch_deltas.shape[-1] == 4:
boxes = apply_box_deltas_2D(anchors[order, :], deltas)
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes before clip/after delta apply have nans: {}".format(boxes[~non_nans])
boxes = clip_boxes_2D(boxes, cf.window)
else:
boxes = apply_box_deltas_3D(anchors[order, :], deltas)
boxes = clip_boxes_3D(boxes, cf.window)
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes before nms/after clip have nans: {}".format(boxes[~non_nans])
# boxes are y1,x1,y2,x2, torchvision-nms requires x1,y1,x2,y2, but consistent swap x<->y is irrelevant.
keep = nms.nms(boxes, scores, cf.rpn_nms_threshold)
keep = keep[:proposal_count]
boxes = boxes[keep, :]
rpn_scores = scores[keep][:, None]
# pad missing boxes with 0.
if boxes.shape[0] < proposal_count:
n_pad_boxes = proposal_count - boxes.shape[0]
zeros = torch.zeros([n_pad_boxes, boxes.shape[1]]).cuda()
boxes = torch.cat([boxes, zeros], dim=0)
zeros = torch.zeros([n_pad_boxes, rpn_scores.shape[1]]).cuda()
rpn_scores = torch.cat([rpn_scores, zeros], dim=0)
# concat box and score info for monitoring/plotting.
batch_out_proposals.append(torch.cat((boxes, rpn_scores), 1).cpu().data.numpy())
# normalize dimensions to range of 0 to 1.
non_nans = boxes == boxes
assert torch.all(non_nans), "unnormalized boxes after nms have nans: {}".format(boxes[~non_nans])
normalized_boxes = boxes / norm
where = normalized_boxes <=1
assert torch.all(where), "normalized box coords >1 found:\n {}\n".format(normalized_boxes[~where])
# add again batch dimension
batch_normalized_props.append(torch.cat((normalized_boxes, rpn_scores), 1).unsqueeze(0))
batch_normalized_props = torch.cat(batch_normalized_props)
batch_out_proposals = np.array(batch_out_proposals)
return batch_normalized_props, batch_out_proposals
def pyramid_roi_align(feature_maps, rois, pool_size, pyramid_levels, dim):
"""
Implements ROI Pooling on multiple levels of the feature pyramid.
:param feature_maps: list of feature maps, each of shape (b, c, y, x , (z))
:param rois: proposals (normalized coords.) as returned by RPN. contain info about original batch element allocation.
(n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs)
:param pool_size: list of poolsizes in dims: [x, y, (z)]
:param pyramid_levels: list. [0, 1, 2, ...]
:return: pooled: pooled feature map rois (n_proposals, c, poolsize_y, poolsize_x, (poolsize_z))
Output:
Pooled regions in the shape: [num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
boxes = rois[:, :dim*2]
batch_ixs = rois[:, dim*2]
# Assign each ROI to a level in the pyramid based on the ROI area.
if dim == 2:
y1, x1, y2, x2 = boxes.chunk(4, dim=1)
else:
y1, x1, y2, x2, z1, z2 = boxes.chunk(6, dim=1)
h = y2 - y1
w = x2 - x1
# Equation 1 in https://arxiv.org/abs/1612.03144. Account for
# the fact that our coordinates are normalized here.
# divide sqrt(h*w) by 1 instead image_area.
roi_level = (4 + torch.log2(torch.sqrt(h*w))).round().int().clamp(pyramid_levels[0], pyramid_levels[-1])
# if Pyramid contains additional level P6, adapt the roi_level assignment accordingly.
if len(pyramid_levels) == 5:
roi_level[h*w > 0.65] = 5
# Loop through levels and apply ROI pooling to each.
pooled = []
box_to_level = []
fmap_shapes = [f.shape for f in feature_maps]
for level_ix, level in enumerate(pyramid_levels):
ix = roi_level == level
if not ix.any():
continue
ix = torch.nonzero(ix)[:, 0]
level_boxes = boxes[ix, :]
# re-assign rois to feature map of original batch element.
ind = batch_ixs[ix].int()
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = level_boxes.detach()
if len(pool_size) == 2:
# remap to feature map coordinate system
y_exp, x_exp = fmap_shapes[level_ix][2:] # exp = expansion
level_boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp], dtype=torch.float32).cuda())
pooled_features = roi_align.roi_align_2d(feature_maps[level_ix],
torch.cat((ind.unsqueeze(1).float(), level_boxes), dim=1),
pool_size)
else:
y_exp, x_exp, z_exp = fmap_shapes[level_ix][2:]
level_boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp, z_exp, z_exp], dtype=torch.float32).cuda())
pooled_features = roi_align.roi_align_3d(feature_maps[level_ix],
torch.cat((ind.unsqueeze(1).float(), level_boxes), dim=1),
pool_size)
pooled.append(pooled_features)
# Pack pooled features into one tensor
pooled = torch.cat(pooled, dim=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = torch.cat(box_to_level, dim=0)
# Rearrange pooled features to match the order of the original boxes
_, box_to_level = torch.sort(box_to_level)
pooled = pooled[box_to_level, :, :]
return pooled
def roi_align_3d_numpy(input: np.ndarray, rois, output_size: tuple,
spatial_scale: float = 1., sampling_ratio: int = -1) -> np.ndarray:
""" This fct mainly serves as a verification method for 3D CUDA implementation of RoIAlign, it's highly
inefficient due to the nested loops.
:param input: (ndarray[N, C, H, W, D]): input feature map
:param rois: list (N,K(n), 6), K(n) = nr of rois in batch-element n, single roi of format (y1,x1,y2,x2,z1,z2)
:param output_size:
:param spatial_scale:
:param sampling_ratio:
:return: (List[N, K(n), C, output_size[0], output_size[1], output_size[2]])
"""
out_height, out_width, out_depth = output_size
coord_grid = tuple([np.linspace(0, input.shape[dim] - 1, num=input.shape[dim]) for dim in range(2, 5)])
pooled_rois = [[]] * len(rois)
assert len(rois) == input.shape[0], "batch dim mismatch, rois: {}, input: {}".format(len(rois), input.shape[0])
print("Numpy 3D RoIAlign progress:", end="\n")
for b in range(input.shape[0]):
for roi in tqdm.tqdm(rois[b]):
y1, x1, y2, x2, z1, z2 = np.array(roi) * spatial_scale
roi_height = max(float(y2 - y1), 1.)
roi_width = max(float(x2 - x1), 1.)
roi_depth = max(float(z2 - z1), 1.)
if sampling_ratio <= 0:
sampling_ratio_h = int(np.ceil(roi_height / out_height))
sampling_ratio_w = int(np.ceil(roi_width / out_width))
sampling_ratio_d = int(np.ceil(roi_depth / out_depth))
else:
sampling_ratio_h = sampling_ratio_w = sampling_ratio_d = sampling_ratio # == n points per bin
bin_height = roi_height / out_height
bin_width = roi_width / out_width
bin_depth = roi_depth / out_depth
n_points = sampling_ratio_h * sampling_ratio_w * sampling_ratio_d
pooled_roi = np.empty((input.shape[1], out_height, out_width, out_depth), dtype="float32")
for chan in range(input.shape[1]):
lin_interpolator = scipy.interpolate.RegularGridInterpolator(coord_grid, input[b, chan],
method="linear")
for bin_iy in range(out_height):
for bin_ix in range(out_width):
for bin_iz in range(out_depth):
bin_val = 0.
for i in range(sampling_ratio_h):
for j in range(sampling_ratio_w):
for k in range(sampling_ratio_d):
loc_ijk = [
y1 + bin_iy * bin_height + (i + 0.5) * (bin_height / sampling_ratio_h),
x1 + bin_ix * bin_width + (j + 0.5) * (bin_width / sampling_ratio_w),
z1 + bin_iz * bin_depth + (k + 0.5) * (bin_depth / sampling_ratio_d)]
# print("loc_ijk", loc_ijk)
if not (np.any([c < -1.0 for c in loc_ijk]) or loc_ijk[0] > input.shape[2] or
loc_ijk[1] > input.shape[3] or loc_ijk[2] > input.shape[4]):
for catch_case in range(3):
# catch on-border cases
if int(loc_ijk[catch_case]) == input.shape[catch_case + 2] - 1:
loc_ijk[catch_case] = input.shape[catch_case + 2] - 1
bin_val += lin_interpolator(loc_ijk)
pooled_roi[chan, bin_iy, bin_ix, bin_iz] = bin_val / n_points
pooled_rois[b].append(pooled_roi)
return np.array(pooled_rois)
def refine_detections(cf, batch_ixs, rois, deltas, scores, regressions):
"""
Refine classified proposals (apply deltas to rpn rois), filter overlaps (nms) and return final detections.
:param rois: (n_proposals, 2 * dim) normalized boxes as proposed by RPN. n_proposals = batch_size * POST_NMS_ROIS
:param deltas: (n_proposals, n_classes, 2 * dim) box refinement deltas as predicted by mrcnn bbox regressor.
:param batch_ixs: (n_proposals) batch element assignment info for re-allocation.
:param scores: (n_proposals, n_classes) probabilities for all classes per roi as predicted by mrcnn classifier.
:param regressions: (n_proposals, n_classes, regression_features (+1 for uncertainty if predicted) regression vector
:return: result: (n_final_detections, (y1, x1, y2, x2, (z1), (z2), batch_ix, pred_class_id, pred_score, *regression vector features))
"""
# class IDs per ROI. Since scores of all classes are of interest (not just max class), all are kept at this point.
class_ids = []
fg_classes = cf.head_classes - 1
# repeat vectors to fill in predictions for all foreground classes.
for ii in range(1, fg_classes + 1):
class_ids += [ii] * rois.shape[0]
class_ids = torch.from_numpy(np.array(class_ids)).cuda()
batch_ixs = batch_ixs.repeat(fg_classes)
rois = rois.repeat(fg_classes, 1)
deltas = deltas.repeat(fg_classes, 1, 1)
scores = scores.repeat(fg_classes, 1)
regressions = regressions.repeat(fg_classes, 1, 1)
# get class-specific scores and bounding box deltas
idx = torch.arange(class_ids.size()[0]).long().cuda()
# using idx instead of slice [:,] squashes first dimension.
#len(class_ids)>scores.shape[1] --> probs is broadcasted by expansion from fg_classes-->len(class_ids)
batch_ixs = batch_ixs[idx]
deltas_specific = deltas[idx, class_ids]
class_scores = scores[idx, class_ids]
regressions = regressions[idx, class_ids]
# apply bounding box deltas. re-scale to image coordinates.
std_dev = torch.from_numpy(np.reshape(cf.rpn_bbox_std_dev, [1, cf.dim * 2])).float().cuda()
scale = torch.from_numpy(cf.scale).float().cuda()
refined_rois = apply_box_deltas_2D(rois, deltas_specific * std_dev) * scale if cf.dim == 2 else \
apply_box_deltas_3D(rois, deltas_specific * std_dev) * scale
# round and cast to int since we're dealing with pixels now
refined_rois = clip_to_window(cf.window, refined_rois)
refined_rois = torch.round(refined_rois)
# filter out low confidence boxes
keep = idx
keep_bool = (class_scores >= cf.model_min_confidence)
if not 0 in torch.nonzero(keep_bool).size():
score_keep = torch.nonzero(keep_bool)[:, 0]
pre_nms_class_ids = class_ids[score_keep]
pre_nms_rois = refined_rois[score_keep]
pre_nms_scores = class_scores[score_keep]
pre_nms_batch_ixs = batch_ixs[score_keep]
for j, b in enumerate(unique1d(pre_nms_batch_ixs)):
bixs = torch.nonzero(pre_nms_batch_ixs == b)[:, 0]
bix_class_ids = pre_nms_class_ids[bixs]
bix_rois = pre_nms_rois[bixs]
bix_scores = pre_nms_scores[bixs]
for i, class_id in enumerate(unique1d(bix_class_ids)):
ixs = torch.nonzero(bix_class_ids == class_id)[:, 0]
# nms expects boxes sorted by score.
ix_rois = bix_rois[ixs]
ix_scores = bix_scores[ixs]
ix_scores, order = ix_scores.sort(descending=True)
ix_rois = ix_rois[order, :]
class_keep = nms.nms(ix_rois, ix_scores, cf.detection_nms_threshold)
# map indices back.
class_keep = keep[score_keep[bixs[ixs[order[class_keep]]]]]
# merge indices over classes for current batch element
b_keep = class_keep if i == 0 else unique1d(torch.cat((b_keep, class_keep)))
# only keep top-k boxes of current batch-element
top_ids = class_scores[b_keep].sort(descending=True)[1][:cf.model_max_instances_per_batch_element]
b_keep = b_keep[top_ids]
# merge indices over batch elements.
batch_keep = b_keep if j == 0 else unique1d(torch.cat((batch_keep, b_keep)))
keep = batch_keep
else:
keep = torch.tensor([0]).long().cuda()
# arrange output
output = [refined_rois[keep], batch_ixs[keep].unsqueeze(1)]
output += [class_ids[keep].unsqueeze(1).float(), class_scores[keep].unsqueeze(1)]
output += [regressions[keep]]
result = torch.cat(output, dim=1)
# shape: (n_keeps, catted feats), catted feats: [0:dim*2] are box_coords, [dim*2] are batch_ics,
# [dim*2+1] are class_ids, [dim*2+2] are scores, [dim*2+3:] are regression vector features (incl uncertainty)
return result
def loss_example_mining(cf, batch_proposals, batch_gt_boxes, batch_gt_masks, batch_roi_scores,
batch_gt_class_ids, batch_gt_regressions):
"""
Subsamples proposals for mrcnn losses and generates targets. Sampling is done per batch element, seems to have positive
effects on training, as opposed to sampling over entire batch. Negatives are sampled via stochastic hard-example mining
(SHEM), where a number of negative proposals is drawn from larger pool of highest scoring proposals for stochasticity.
Scoring is obtained here as the max over all foreground probabilities as returned by mrcnn_classifier (worked better than
loss-based class-balancing methods like "online hard-example mining" or "focal loss".)
Classification-regression duality: regressions can be given along with classes (at least fg/bg, only class scores
are used for ranking).
:param batch_proposals: (n_proposals, (y1, x1, y2, x2, (z1), (z2), batch_ixs).
boxes as proposed by RPN. n_proposals here is determined by batch_size * POST_NMS_ROIS.
:param mrcnn_class_logits: (n_proposals, n_classes)
:param batch_gt_boxes: list over batch elements. Each element is a list over the corresponding roi target coordinates.
:param batch_gt_masks: list over batch elements. Each element is binary mask of shape (n_gt_rois, c, y, x, (z))
:param batch_gt_class_ids: list over batch elements. Each element is a list over the corresponding roi target labels.
if no classes predicted (only fg/bg from RPN): expected as pseudo classes [0, 1] for bg, fg.
:param batch_gt_regressions: list over b elements. Each element is a regression target vector. if None--> pseudo
:return: sample_indices: (n_sampled_rois) indices of sampled proposals to be used for loss functions.
:return: target_class_ids: (n_sampled_rois)containing target class labels of sampled proposals.
:return: target_deltas: (n_sampled_rois, 2 * dim) containing target deltas of sampled proposals for box refinement.
:return: target_masks: (n_sampled_rois, y, x, (z)) containing target masks of sampled proposals.
"""
# normalization of target coordinates
#global sample_regressions
if cf.dim == 2:
h, w = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w])).float().cuda()
else:
h, w, z = cf.patch_size
scale = torch.from_numpy(np.array([h, w, h, w, z, z])).float().cuda()
positive_count = 0
negative_count = 0
sample_positive_indices = []
sample_negative_indices = []
sample_deltas = []
sample_masks = []
sample_class_ids = []
if batch_gt_regressions is not None:
sample_regressions = []
else:
target_regressions = torch.FloatTensor().cuda()
std_dev = torch.from_numpy(cf.bbox_std_dev).float().cuda()
# loop over batch and get positive and negative sample rois.
for b in range(len(batch_gt_boxes)):
gt_masks = torch.from_numpy(batch_gt_masks[b]).float().cuda()
gt_class_ids = torch.from_numpy(batch_gt_class_ids[b]).int().cuda()
if batch_gt_regressions is not None:
gt_regressions = torch.from_numpy(batch_gt_regressions[b]).float().cuda()
#if np.any(batch_gt_class_ids[b] > 0): # skip roi selection for no gt images.
if np.any([len(coords)>0 for coords in batch_gt_boxes[b]]):
gt_boxes = torch.from_numpy(batch_gt_boxes[b]).float().cuda() / scale
else:
gt_boxes = torch.FloatTensor().cuda()
# get proposals and indices of current batch element.
proposals = batch_proposals[batch_proposals[:, -1] == b][:, :-1]
batch_element_indices = torch.nonzero(batch_proposals[:, -1] == b).squeeze(1)
# Compute overlaps matrix [proposals, gt_boxes]
if not 0 in gt_boxes.size():
if gt_boxes.shape[1] == 4:
assert cf.dim == 2, "gt_boxes shape {} doesnt match cf.dim{}".format(gt_boxes.shape, cf.dim)
overlaps = bbox_overlaps_2D(proposals, gt_boxes)
else:
assert cf.dim == 3, "gt_boxes shape {} doesnt match cf.dim{}".format(gt_boxes.shape, cf.dim)
overlaps = bbox_overlaps_3D(proposals, gt_boxes)
# Determine positive and negative ROIs
roi_iou_max = torch.max(overlaps, dim=1)[0]
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = roi_iou_max >= (0.5 if cf.dim == 2 else 0.3)
# 2. Negative ROIs are those with < 0.1 with every GT box.
negative_roi_bool = roi_iou_max < (0.1 if cf.dim == 2 else 0.01)
else:
positive_roi_bool = torch.FloatTensor().cuda()
negative_roi_bool = torch.from_numpy(np.array([1]*proposals.shape[0])).cuda()
# Sample Positive ROIs
if not 0 in torch.nonzero(positive_roi_bool).size():
positive_indices = torch.nonzero(positive_roi_bool).squeeze(1)
positive_samples = int(cf.train_rois_per_image * cf.roi_positive_ratio)
rand_idx = torch.randperm(positive_indices.size()[0])
rand_idx = rand_idx[:positive_samples].cuda()
positive_indices = positive_indices[rand_idx]
positive_samples = positive_indices.size()[0]
positive_rois = proposals[positive_indices, :]
# Assign positive ROIs to GT boxes.
positive_overlaps = overlaps[positive_indices, :]
roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]
roi_gt_boxes = gt_boxes[roi_gt_box_assignment, :]
roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment]
if batch_gt_regressions is not None:
roi_gt_regressions = gt_regressions[roi_gt_box_assignment]
# Compute bbox refinement targets for positive ROIs
deltas = box_refinement(positive_rois, roi_gt_boxes)
deltas /= std_dev
roi_masks = gt_masks[roi_gt_box_assignment]
assert roi_masks.shape[1] == 1, "gt masks have more than one channel --> is this desired?"
# Compute mask targets
boxes = positive_rois
box_ids = torch.arange(roi_masks.shape[0]).cuda().unsqueeze(1).float()
if len(cf.mask_shape) == 2:
y_exp, x_exp = roi_masks.shape[2:] # exp = expansion
boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp], dtype=torch.float32).cuda())
masks = roi_align.roi_align_2d(roi_masks,
torch.cat((box_ids, boxes), dim=1),
cf.mask_shape)
else:
y_exp, x_exp, z_exp = roi_masks.shape[2:] # exp = expansion
boxes.mul_(torch.tensor([y_exp, x_exp, y_exp, x_exp, z_exp, z_exp], dtype=torch.float32).cuda())
masks = roi_align.roi_align_3d(roi_masks,
torch.cat((box_ids, boxes), dim=1),
cf.mask_shape)
masks = masks.squeeze(1)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = torch.round(masks)
sample_positive_indices.append(batch_element_indices[positive_indices])
sample_deltas.append(deltas)
sample_masks.append(masks)
sample_class_ids.append(roi_gt_class_ids)
if batch_gt_regressions is not None:
sample_regressions.append(roi_gt_regressions)
positive_count += positive_samples
else:
positive_samples = 0
# Sample negative ROIs. Add enough to maintain positive:negative ratio, but at least 1. Sample via SHEM.
if not 0 in torch.nonzero(negative_roi_bool).size():
negative_indices = torch.nonzero(negative_roi_bool).squeeze(1)
r = 1.0 / cf.roi_positive_ratio
b_neg_count = np.max((int(r * positive_samples - positive_samples), 1))
roi_scores_neg = batch_roi_scores[batch_element_indices[negative_indices]]
raw_sampled_indices = shem(roi_scores_neg, b_neg_count, cf.shem_poolsize)
sample_negative_indices.append(batch_element_indices[negative_indices[raw_sampled_indices]])
negative_count += raw_sampled_indices.size()[0]
if len(sample_positive_indices) > 0:
target_deltas = torch.cat(sample_deltas)
target_masks = torch.cat(sample_masks)
target_class_ids = torch.cat(sample_class_ids)
if batch_gt_regressions is not None:
target_regressions = torch.cat(sample_regressions)
# Pad target information with zeros for negative ROIs.
if positive_count > 0 and negative_count > 0:
sample_indices = torch.cat((torch.cat(sample_positive_indices), torch.cat(sample_negative_indices)), dim=0)
zeros = torch.zeros(negative_count, cf.dim * 2).cuda()
target_deltas = torch.cat([target_deltas, zeros], dim=0)
zeros = torch.zeros(negative_count, *cf.mask_shape).cuda()
target_masks = torch.cat([target_masks, zeros], dim=0)
zeros = torch.zeros(negative_count).int().cuda()
target_class_ids = torch.cat([target_class_ids, zeros], dim=0)
if batch_gt_regressions is not None:
# regression targets need to have 0 as background/negative with below practice
if 'regression_bin' in cf.prediction_tasks:
zeros = torch.zeros(negative_count, dtype=torch.float).cuda()
else:
zeros = torch.zeros(negative_count, cf.regression_n_features, dtype=torch.float).cuda()
target_regressions = torch.cat([target_regressions, zeros], dim=0)
elif positive_count > 0:
sample_indices = torch.cat(sample_positive_indices)
elif negative_count > 0:
sample_indices = torch.cat(sample_negative_indices)
target_deltas = torch.zeros(negative_count, cf.dim * 2).cuda()
target_masks = torch.zeros(negative_count, *cf.mask_shape).cuda()
target_class_ids = torch.zeros(negative_count).int().cuda()
if batch_gt_regressions is not None:
if 'regression_bin' in cf.prediction_tasks:
target_regressions = torch.zeros(negative_count, dtype=torch.float).cuda()
else:
target_regressions = torch.zeros(negative_count, cf.regression_n_features, dtype=torch.float).cuda()
else:
sample_indices = torch.LongTensor().cuda()
target_class_ids = torch.IntTensor().cuda()
target_deltas = torch.FloatTensor().cuda()
target_masks = torch.FloatTensor().cuda()
target_regressions = torch.FloatTensor().cuda()
return sample_indices, target_deltas, target_masks, target_class_ids, target_regressions
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack([box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes, box_centers + 0.5 * box_sizes], axis=1)
return boxes
def generate_anchors_3D(scales_xy, scales_z, ratios, shape, feature_stride_xy, feature_stride_z, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales_xy, ratios_meshed = np.meshgrid(np.array(scales_xy), np.array(ratios))
scales_xy = scales_xy.flatten()
ratios_meshed = ratios_meshed.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales_xy / np.sqrt(ratios_meshed)
widths = scales_xy * np.sqrt(ratios_meshed)
depths = np.tile(np.array(scales_z), len(ratios_meshed)//np.array(scales_z)[..., None].shape[0])
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride_xy #translate from fm positions to input coords.
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride_xy
shifts_z = np.arange(0, shape[2], anchor_stride) * (feature_stride_z)
shifts_x, shifts_y, shifts_z = np.meshgrid(shifts_x, shifts_y, shifts_z)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
box_depths, box_centers_z = np.meshgrid(depths, shifts_z)
# Reshape to get a list of (y, x, z) and a list of (h, w, d)
box_centers = np.stack(
[box_centers_y, box_centers_x, box_centers_z], axis=2).reshape([-1, 3])
box_sizes = np.stack([box_heights, box_widths, box_depths], axis=2).reshape([-1, 3])
# Convert to corner coordinates (y1, x1, y2, x2, z1, z2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
boxes = np.transpose(np.array([boxes[:, 0], boxes[:, 1], boxes[:, 3], boxes[:, 4], boxes[:, 2], boxes[:, 5]]), axes=(1, 0))
return boxes
def generate_pyramid_anchors(logger, cf):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
from configs:
:param scales: cf.RPN_ANCHOR_SCALES , for conformity with retina nets: scale entries need to be list, e.g. [[4], [8], [16], [32]]
:param ratios: cf.RPN_ANCHOR_RATIOS , e.g. [0.5, 1, 2]
:param feature_shapes: cf.BACKBONE_SHAPES , e.g. [array of shapes per feature map] [80, 40, 20, 10, 5]
:param feature_strides: cf.BACKBONE_STRIDES , e.g. [2, 4, 8, 16, 32, 64]
:param anchors_stride: cf.RPN_ANCHOR_STRIDE , e.g. 1
:return anchors: (N, (y1, x1, y2, x2, (z1), (z2)). All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come first, then anchors of scale[1], and so on.
"""
scales = cf.rpn_anchor_scales
ratios = cf.rpn_anchor_ratios
feature_shapes = cf.backbone_shapes
anchor_stride = cf.rpn_anchor_stride
pyramid_levels = cf.pyramid_levels
feature_strides = cf.backbone_strides
logger.info("anchor scales {} and feature map shapes {}".format(scales, feature_shapes))
expected_anchors = [np.prod(feature_shapes[level]) * len(ratios) * len(scales['xy'][level]) for level in pyramid_levels]
anchors = []
for lix, level in enumerate(pyramid_levels):
if len(feature_shapes[level]) == 2:
anchors.append(generate_anchors(scales['xy'][level], ratios, feature_shapes[level],
feature_strides['xy'][level], anchor_stride))
elif len(feature_shapes[level]) == 3:
anchors.append(generate_anchors_3D(scales['xy'][level], scales['z'][level], ratios, feature_shapes[level],
feature_strides['xy'][level], feature_strides['z'][level], anchor_stride))
else:
raise Exception("invalid feature_shapes[{}] size {}".format(level, feature_shapes[level]))
logger.info("level {}: expected anchors {}, built anchors {}.".format(level, expected_anchors[lix], anchors[-1].shape))
out_anchors = np.concatenate(anchors, axis=0)
logger.info("Total: expected anchors {}, built anchors {}.".format(np.sum(expected_anchors), out_anchors.shape))
return out_anchors
def apply_box_deltas_2D(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
non_nans = boxes == boxes
assert torch.all(non_nans), "boxes at beginning of delta apply have nans: {}".format(
boxes[~non_nans])
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
# clip delta preds in order to avoid infs and later nans after exponentiation.
height *= torch.exp(torch.clamp(deltas[:, 2], max=6.))
width *= torch.exp(torch.clamp(deltas[:, 3], max=6.))
non_nans = width == width
assert torch.all(non_nans), "inside delta apply, width has nans: {}".format(
width[~non_nans])
# 0.*inf results in nan. fix nans to zeros?
# height[height!=height] = 0.
# width[width!=width] = 0.
non_nans = height == height
assert torch.all(non_nans), "inside delta apply, height has nans directly after setting to zero: {}".format(
height[~non_nans])
non_nans = width == width
assert torch.all(non_nans), "inside delta apply, width has nans directly after setting to zero: {}".format(
width[~non_nans])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = torch.stack([y1, x1, y2, x2], dim=1)
non_nans = result == result
assert torch.all(non_nans), "inside delta apply, result has nans: {}".format(result[~non_nans])
return result
def apply_box_deltas_3D(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 6] where each row is y1, x1, y2, x2, z1, z2
deltas: [N, 6] where each row is [dy, dx, dz, log(dh), log(dw), log(dd)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
depth = boxes[:, 5] - boxes[:, 4]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
center_z = boxes[:, 4] + 0.5 * depth
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
center_z += deltas[:, 2] * depth
height *= torch.exp(deltas[:, 3])
width *= torch.exp(deltas[:, 4])
depth *= torch.exp(deltas[:, 5])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
z1 = center_z - 0.5 * depth
y2 = y1 + height
x2 = x1 + width
z2 = z1 + depth
result = torch.stack([y1, x1, y2, x2, z1, z2], dim=1)
return result
def clip_boxes_2D(boxes, window):
"""
boxes: [N, 4] each col is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
boxes = torch.stack( \
[boxes[:, 0].clamp(float(window[0]), float(window[2])),
boxes[:, 1].clamp(float(window[1]), float(window[3])),
boxes[:, 2].clamp(float(window[0]), float(window[2])),
boxes[:, 3].clamp(float(window[1]), float(window[3]))], 1)
return boxes
def clip_boxes_3D(boxes, window):
"""
boxes: [N, 6] each col is y1, x1, y2, x2, z1, z2
window: [6] in the form y1, x1, y2, x2, z1, z2
"""
boxes = torch.stack( \
[boxes[:, 0].clamp(float(window[0]), float(window[2])),
boxes[:, 1].clamp(float(window[1]), float(window[3])),
boxes[:, 2].clamp(float(window[0]), float(window[2])),
boxes[:, 3].clamp(float(window[1]), float(window[3])),
boxes[:, 4].clamp(float(window[4]), float(window[5])),
boxes[:, 5].clamp(float(window[4]), float(window[5]))], 1)
return boxes
from matplotlib import pyplot as plt
def clip_boxes_numpy(boxes, window):
"""
boxes: [N, 4] each col is y1, x1, y2, x2 / [N, 6] in 3D.
window: iamge shape (y, x, (z))
"""
if boxes.shape[1] == 4:
boxes = np.concatenate(
(np.clip(boxes[:, 0], 0, window[0])[:, None],
np.clip(boxes[:, 1], 0, window[0])[:, None],
np.clip(boxes[:, 2], 0, window[1])[:, None],
np.clip(boxes[:, 3], 0, window[1])[:, None]), 1
)
else:
boxes = np.concatenate(
(np.clip(boxes[:, 0], 0, window[0])[:, None],
np.clip(boxes[:, 1], 0, window[0])[:, None],
np.clip(boxes[:, 2], 0, window[1])[:, None],
np.clip(boxes[:, 3], 0, window[1])[:, None],
np.clip(boxes[:, 4], 0, window[2])[:, None],
np.clip(boxes[:, 5], 0, window[2])[:, None]), 1
)
return boxes
def bbox_overlaps_2D(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
boxes1_repeat = boxes2.size()[0]
boxes2_repeat = boxes1.size()[0]
boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4)
boxes2 = boxes2.repeat(boxes2_repeat,1)
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1)
b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1)
y1 = torch.max(b1_y1, b2_y1)[:, 0]
x1 = torch.max(b1_x1, b2_x1)[:, 0]
y2 = torch.min(b1_y2, b2_y2)[:, 0]
x2 = torch.min(b1_x2, b2_x2)[:, 0]
#--> expects x1<x2 & y1<y2
zeros = torch.zeros(y1.size()[0], requires_grad=False)
if y1.is_cuda:
zeros = zeros.cuda()
intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area[:,0] + b2_area[:,0] - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
assert torch.all(iou<=1), "iou score>1 produced in bbox_overlaps_2D"
overlaps = iou.view(boxes2_repeat, boxes1_repeat) #--> per gt box: ious of all proposal boxes with that gt box
return overlaps
def bbox_overlaps_3D(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2, z1, z2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
boxes1_repeat = boxes2.size()[0]
boxes2_repeat = boxes1.size()[0]
boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,6)
boxes2 = boxes2.repeat(boxes2_repeat,1)
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2, b1_z1, b1_z2 = boxes1.chunk(6, dim=1)
b2_y1, b2_x1, b2_y2, b2_x2, b2_z1, b2_z2 = boxes2.chunk(6, dim=1)
y1 = torch.max(b1_y1, b2_y1)[:, 0]
x1 = torch.max(b1_x1, b2_x1)[:, 0]
y2 = torch.min(b1_y2, b2_y2)[:, 0]
x2 = torch.min(b1_x2, b2_x2)[:, 0]
z1 = torch.max(b1_z1, b2_z1)[:, 0]
z2 = torch.min(b1_z2, b2_z2)[:, 0]
zeros = torch.zeros(y1.size()[0], requires_grad=False)
if y1.is_cuda:
zeros = zeros.cuda()
intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros) * torch.max(z2 - z1, zeros)
# 3. Compute unions
b1_volume = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) * (b1_z2 - b1_z1)
b2_volume = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) * (b2_z2 - b2_z1)
union = b1_volume[:,0] + b2_volume[:,0] - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = iou.view(boxes2_repeat, boxes1_repeat)
return overlaps
def gt_anchor_matching(cf, anchors, gt_boxes, gt_class_ids=None):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2, (z1), (z2))]
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2, (z1), (z2))]
gt_class_ids (optional): [num_gt_boxes] Integer class IDs for one stage detectors. in RPN case of Mask R-CNN,
set all positive matches to 1 (foreground)
Returns:
anchor_class_matches: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
anchor_delta_targets: [N, (dy, dx, (dz), log(dh), log(dw), (log(dd)))] Anchor bbox deltas.
"""
anchor_class_matches = np.zeros([anchors.shape[0]], dtype=np.int32)
anchor_delta_targets = np.zeros((cf.rpn_train_anchors_per_image, 2*cf.dim))
anchor_matching_iou = cf.anchor_matching_iou
if gt_boxes is None:
anchor_class_matches = np.full(anchor_class_matches.shape, fill_value=-1)
return anchor_class_matches, anchor_delta_targets
# for mrcnn: anchor matching is done for RPN loss, so positive labels are all 1 (foreground)
if gt_class_ids is None:
gt_class_ids = np.array([1] * len(gt_boxes))
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= anchor_matching_iou then it's positive.
# If an anchor overlaps a GT box with IoU < 0.1 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.1).
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
if anchors.shape[1] == 4:
anchor_class_matches[(anchor_iou_max < 0.1)] = -1
elif anchors.shape[1] == 6:
anchor_class_matches[(anchor_iou_max < 0.01)] = -1
else:
raise ValueError('anchor shape wrong {}'.format(anchors.shape))
# 2. Set an anchor for each GT box (regardless of IoU value).
gt_iou_argmax = np.argmax(overlaps, axis=0)
for ix, ii in enumerate(gt_iou_argmax):
anchor_class_matches[ii] = gt_class_ids[ix]
# 3. Set anchors with high overlap as positive.
above_thresh_ixs = np.argwhere(anchor_iou_max >= anchor_matching_iou)
anchor_class_matches[above_thresh_ixs] = gt_class_ids[anchor_iou_argmax[above_thresh_ixs]]
# Subsample to balance positive anchors.
ids = np.where(anchor_class_matches > 0)[0]
extra = len(ids) - (cf.rpn_train_anchors_per_image // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
anchor_class_matches[ids] = 0
# Leave all negative proposals negative for now and sample from them later in online hard example mining.
# For positive anchors, compute shift and scale needed to transform them to match the corresponding GT boxes.
ids = np.where(anchor_class_matches > 0)[0]
ix = 0 # index into anchor_delta_targets
for i, a in zip(ids, anchors[ids]):
# closest gt box (it might have IoU < anchor_matching_iou)
gt = gt_boxes[anchor_iou_argmax[i]]
# convert coordinates to center plus width/height.
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
if cf.dim == 2:
anchor_delta_targets[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
else:
gt_d = gt[5] - gt[4]
gt_center_z = gt[4] + 0.5 * gt_d
a_d = a[5] - a[4]
a_center_z = a[4] + 0.5 * a_d
anchor_delta_targets[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
(gt_center_z - a_center_z) / a_d,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
np.log(gt_d / a_d)
]
# normalize.
anchor_delta_targets[ix] /= cf.rpn_bbox_std_dev
ix += 1
return anchor_class_matches, anchor_delta_targets
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2) / 3D: (z1, z2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)] / 3D: (z1, z2)
"""
boxes[:, 0] = boxes[:, 0].clamp(float(window[0]), float(window[2]))
boxes[:, 1] = boxes[:, 1].clamp(float(window[1]), float(window[3]))
boxes[:, 2] = boxes[:, 2].clamp(float(window[0]), float(window[2]))
boxes[:, 3] = boxes[:, 3].clamp(float(window[1]), float(window[3]))
if boxes.shape[1] > 5:
boxes[:, 4] = boxes[:, 4].clamp(float(window[4]), float(window[5]))
boxes[:, 5] = boxes[:, 5].clamp(float(window[4]), float(window[5]))
return boxes
############################################################
# Connected Componenent Analysis
############################################################
def get_coords(binary_mask, n_components, dim):
"""
loops over batch to perform connected component analysis on binary input mask. computes box coordinates around
n_components - biggest components (rois).
:param binary_mask: (b, y, x, (z)). binary mask for one specific foreground class.
:param n_components: int. number of components to extract per batch element and class.
:return: coords (b, n, (y1, x1, y2, x2 (,z1, z2))
:return: batch_components (b, n, (y1, x1, y2, x2, (z1), (z2))
"""
assert len(binary_mask.shape)==dim+1
binary_mask = binary_mask.astype('uint8')
batch_coords = []
batch_components = []
for ix,b in enumerate(binary_mask):
clusters, n_cands = lb(b) # performs connected component analysis.
uniques, counts = np.unique(clusters, return_counts=True)
keep_uniques = uniques[1:][np.argsort(counts[1:])[::-1]][:n_components] #only keep n_components largest components
p_components = np.array([(clusters == ii) * 1 for ii in keep_uniques]) # separate clusters and concat
p_coords = []
if p_components.shape[0] > 0:
for roi in p_components:
mask_ixs = np.argwhere(roi != 0)
# get coordinates around component.
roi_coords = [np.min(mask_ixs[:, 0]) - 1, np.min(mask_ixs[:, 1]) - 1, np.max(mask_ixs[:, 0]) + 1,
np.max(mask_ixs[:, 1]) + 1]
if dim == 3:
roi_coords += [np.min(mask_ixs[:, 2]), np.max(mask_ixs[:, 2])+1]
p_coords.append(roi_coords)
p_coords = np.array(p_coords)
#clip coords.
p_coords[p_coords < 0] = 0
p_coords[:, :4][p_coords[:, :4] > binary_mask.shape[-2]] = binary_mask.shape[-2]
if dim == 3:
p_coords[:, 4:][p_coords[:, 4:] > binary_mask.shape[-1]] = binary_mask.shape[-1]
batch_coords.append(p_coords)
batch_components.append(p_components)
return batch_coords, batch_components
# noinspection PyCallingNonCallable
def get_coords_gpu(binary_mask, n_components, dim):
"""
loops over batch to perform connected component analysis on binary input mask. computes box coordiantes around
n_components - biggest components (rois).
:param binary_mask: (b, y, x, (z)). binary mask for one specific foreground class.
:param n_components: int. number of components to extract per batch element and class.
:return: coords (b, n, (y1, x1, y2, x2 (,z1, z2))
:return: batch_components (b, n, (y1, x1, y2, x2, (z1), (z2))
"""
raise Exception("throws floating point exception")
assert len(binary_mask.shape)==dim+1
binary_mask = binary_mask.type(torch.uint8)
batch_coords = []
batch_components = []
for ix,b in enumerate(binary_mask):
clusters, n_cands = lb(b.cpu().data.numpy()) # peforms connected component analysis.
clusters = torch.from_numpy(clusters).cuda()
uniques = torch.unique(clusters)
counts = torch.stack([(clusters==unique).sum() for unique in uniques])
keep_uniques = uniques[1:][torch.sort(counts[1:])[1].flip(0)][:n_components] #only keep n_components largest components
p_components = torch.cat([(clusters == ii).unsqueeze(0) for ii in keep_uniques]).cuda() # separate clusters and concat
p_coords = []
if p_components.shape[0] > 0:
for roi in p_components:
mask_ixs = torch.nonzero(roi)
# get coordinates around component.
roi_coords = [torch.min(mask_ixs[:, 0]) - 1, torch.min(mask_ixs[:, 1]) - 1,
torch.max(mask_ixs[:, 0]) + 1,
torch.max(mask_ixs[:, 1]) + 1]
if dim == 3:
roi_coords += [torch.min(mask_ixs[:, 2]), torch.max(mask_ixs[:, 2])+1]
p_coords.append(roi_coords)
p_coords = torch.tensor(p_coords)
#clip coords.
p_coords[p_coords < 0] = 0
p_coords[:, :4][p_coords[:, :4] > binary_mask.shape[-2]] = binary_mask.shape[-2]
if dim == 3:
p_coords[:, 4:][p_coords[:, 4:] > binary_mask.shape[-1]] = binary_mask.shape[-1]
batch_coords.append(p_coords)
batch_components.append(p_components)
return batch_coords, batch_components
############################################################
# Pytorch Utility Functions
############################################################
def unique1d(tensor):
"""discard all elements of tensor that occur more than once; make tensor unique.
:param tensor:
:return:
"""
if tensor.size()[0] == 0 or tensor.size()[0] == 1:
return tensor
tensor = tensor.sort()[0]
unique_bool = tensor[1:] != tensor[:-1]
first_element = torch.tensor([True], dtype=torch.bool, requires_grad=False)
if tensor.is_cuda:
first_element = first_element.cuda()
unique_bool = torch.cat((first_element, unique_bool), dim=0)
return tensor[unique_bool.data]
def intersect1d(tensor1, tensor2):
aux = torch.cat((tensor1, tensor2), dim=0)
aux = aux.sort(descending=True)[0]
return aux[:-1][(aux[1:] == aux[:-1]).data]
def shem(roi_probs_neg, negative_count, poolsize):
"""
stochastic hard example mining: from a list of indices (referring to non-matched predictions),
determine a pool of highest scoring (worst false positives) of size negative_count*poolsize.
Then, sample n (= negative_count) predictions of this pool as negative examples for loss.
:param roi_probs_neg: tensor of shape (n_predictions, n_classes).
:param negative_count: int.
:param poolsize: int.
:return: (negative_count). indices refer to the positions in roi_probs_neg. If pool smaller than expected due to
limited negative proposals availabel, this function will return sampled indices of number < negative_count without
throwing an error.
"""
# sort according to higehst foreground score.
probs, order = roi_probs_neg[:, 1:].max(1)[0].sort(descending=True)
select = torch.tensor((poolsize * int(negative_count), order.size()[0])).min().int()
pool_indices = order[:select]
rand_idx = torch.randperm(pool_indices.size()[0])
return pool_indices[rand_idx[:negative_count].cuda()]
############################################################
# Weight Init
############################################################
def initialize_weights(net):
"""Initialize model weights. Current Default in Pytorch (version 0.4.1) is initialization from a uniform distriubtion.
Will expectably be changed to kaiming_uniform in future versions.
"""
init_type = net.cf.weight_init
for m in [module for module in net.modules() if type(module) in [torch.nn.Conv2d, torch.nn.Conv3d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d,
torch.nn.Linear]]:
if init_type == 'xavier_uniform':
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif init_type == 'xavier_normal':
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif init_type == "kaiming_uniform":
torch.nn.init.kaiming_uniform_(m.weight.data, mode='fan_out', nonlinearity=net.cf.relu, a=0)
if m.bias is not None:
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(m.weight.data)
bound = 1 / np.sqrt(fan_out)
torch.nn.init.uniform_(m.bias, -bound, bound)
elif init_type == "kaiming_normal":
torch.nn.init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity=net.cf.relu, a=0)
if m.bias is not None:
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(m.weight.data)
bound = 1 / np.sqrt(fan_out)
torch.nn.init.normal_(m.bias, -bound, bound)
net.logger.info("applied {} weight init.".format(init_type)) | 45.089343 | 144 | 0.621074 |
4409402fc2d1a6355bab21b28369a59ac9aeb66b | 1,922 | py | Python | challenges/fifo_animal_shelter/test_fifo_animal_shelter.py | scott-currie/data_structures_and_algorithms | 04c1f50c01dbe6ee15c3f0a1155cc2c9528bdd06 | [
"MIT"
] | null | null | null | challenges/fifo_animal_shelter/test_fifo_animal_shelter.py | scott-currie/data_structures_and_algorithms | 04c1f50c01dbe6ee15c3f0a1155cc2c9528bdd06 | [
"MIT"
] | null | null | null | challenges/fifo_animal_shelter/test_fifo_animal_shelter.py | scott-currie/data_structures_and_algorithms | 04c1f50c01dbe6ee15c3f0a1155cc2c9528bdd06 | [
"MIT"
] | null | null | null | from .fifo_animal_shelter import AnimalShelter
import pytest
@pytest.fixture
def dog_cat_dog():
shelter = AnimalShelter()
shelter.enqueue('dog')
shelter.enqueue('cat')
shelter.enqueue('dog')
return shelter
@pytest.fixture
def empty_shelter():
shelter = AnimalShelter()
return shelter
def test_empty_shelter_len_zero(empty_shelter):
"""Test empty shelter has length zero."""
assert len(empty_shelter) == 0
def test_empty_shelter_dequeue_is_none(empty_shelter):
"""Test empty shelter returns None on dequeue."""
assert empty_shelter.dequeue('cat') is None
def test_dcd_has_dequeue(dog_cat_dog):
"""Test a shelter has dequeue."""
assert dog_cat_dog.dequeue
def test_enqueue_dequeue_match():
"""Test a pet enqueued then dequeued returns the same value."""
shelter = AnimalShelter()
shelter.enqueue('dog')
assert shelter.dequeue('dog').val == 'dog'
assert shelter.dequeue('dog') is None
assert shelter.dequeue('cat') is None
def test_enqueue_dequeue_reverse_order():
"""Test that multiple pets of different types are dequeued in
the same order they were queued in."""
shelter = AnimalShelter()
shelter.enqueue('dog')
shelter.enqueue('cat')
# assert shelter.dequeue('cat').val == 'cat'
assert shelter.dequeue('dog').val == 'dog'
assert shelter.dequeue('cat').val == 'cat'
assert shelter.dequeue('dog') is None
def test_dequeue_without_pref():
"""Test dequeue without pref returns the first pet in the queue."""
shelter = AnimalShelter()
shelter.enqueue('cat')
shelter.enqueue('dog')
shelter.enqueue('dog')
shelter.enqueue('dog')
assert shelter.dequeue().val == 'cat'
def test_dequeue_without_pref_returns_none(empty_shelter):
"""Test that dequeue without a preference returns None
if there are no pets in the queue."""
assert empty_shelter.dequeue() is None
| 27.457143 | 71 | 0.704995 |
678aa0b96b739986579cd179d519e55a935e9714 | 55,067 | py | Python | src/radical/pilot/pmgr/launching/default.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null | src/radical/pilot/pmgr/launching/default.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null | src/radical/pilot/pmgr/launching/default.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null | # pylint: disable=protected-access
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import copy
import math
import time
import pprint
import shutil
import tempfile
import radical.saga as rs
import radical.saga.filesystem as rsfs
import radical.utils as ru
from ... import states as rps
from ... import constants as rpc
from .base import PMGRLaunchingComponent
from ...staging_directives import complete_url
# ------------------------------------------------------------------------------
# local constants
DEFAULT_AGENT_SPAWNER = 'POPEN'
DEFAULT_RP_VERSION = 'local'
DEFAULT_VIRTENV_MODE = 'update'
DEFAULT_VIRTENV_DIST = 'default'
DEFAULT_AGENT_CONFIG = 'default'
JOB_CANCEL_DELAY = 120 # seconds between cancel signal and job kill
JOB_CHECK_INTERVAL = 60 # seconds between runs of the job state check loop
JOB_CHECK_MAX_MISSES = 3 # number of times to find a job missing before
# declaring it dead
LOCAL_SCHEME = 'file'
BOOTSTRAPPER_0 = "bootstrap_0.sh"
# ------------------------------------------------------------------------------
#
class Default(PMGRLaunchingComponent):
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
PMGRLaunchingComponent.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def initialize(self):
# we don't really have an output queue, as we pass control over the
# pilot jobs to the resource management system (ResourceManager).
self._pilots = dict() # dict for all known pilots
self._pilots_lock = ru.RLock() # lock on maipulating the above
self._checking = list() # pilots to check state on
self._check_lock = ru.RLock() # lock on maipulating the above
self._saga_fs_cache = dict() # cache of saga directories
self._saga_js_cache = dict() # cache of saga job services
self._sandboxes = dict() # cache of resource sandbox URLs
self._cache_lock = ru.RLock() # lock for cache
self._mod_dir = os.path.dirname(os.path.abspath(__file__))
self._root_dir = "%s/../../" % self._mod_dir
self._conf_dir = "%s/configs/" % self._root_dir
self.register_input(rps.PMGR_LAUNCHING_PENDING,
rpc.PMGR_LAUNCHING_QUEUE, self.work)
# FIXME: make interval configurable
self.register_timed_cb(self._pilot_watcher_cb, timer=10.0)
# we listen for pilot cancel and input staging commands
self.register_subscriber(rpc.CONTROL_PUBSUB, self._pmgr_control_cb)
self._log.info(ru.get_version([self._mod_dir, self._root_dir]))
self._rp_version, _, _, _, self._rp_sdist_name, self._rp_sdist_path = \
ru.get_version([self._mod_dir, self._root_dir])
# --------------------------------------------------------------------------
#
def finalize(self):
try:
self.unregister_timed_cb(self._pilot_watcher_cb)
self.unregister_input(rps.PMGR_LAUNCHING_PENDING,
rpc.PMGR_LAUNCHING_QUEUE, self.work)
# FIXME: always kill all saga jobs for non-final pilots at termination,
# and set the pilot states to CANCELED. This will conflict with
# disconnect/reconnect semantics.
with self._pilots_lock:
pids = list(self._pilots.keys())
self._cancel_pilots(pids)
self._kill_pilots(pids)
with self._cache_lock:
for url,js in self._saga_js_cache.items():
self._log.debug('close js %s', url)
js.close()
except:
self._log.exception('finalization error')
# --------------------------------------------------------------------------
#
def _pmgr_control_cb(self, topic, msg):
cmd = msg['cmd']
arg = msg['arg']
self._log.debug('launcher got %s', msg)
if cmd == 'pilot_staging_input_request':
self._handle_pilot_input_staging(arg['pilot'], arg['sds'])
if cmd == 'pilot_staging_output_request':
self._handle_pilot_output_staging(arg['pilot'], arg['sds'])
elif cmd == 'cancel_pilots':
# on cancel_pilot requests, we forward the DB entries via MongoDB,
# by pushing a pilot update. We also mark the pilot for
# cancelation, so that the pilot watcher can cancel the job after
# JOB_CANCEL_DELAY seconds, in case the pilot did not react on the
# command in time.
pmgr = arg['pmgr']
pids = arg['uids']
if pmgr != self._pmgr:
# this request is not for us to enact
return True
if not isinstance(pids, list):
pids = [pids]
self._log.info('received pilot_cancel command (%s)', pids)
self._cancel_pilots(pids)
return True
# --------------------------------------------------------------------------
#
def _handle_pilot_input_staging(self, pilot, sds):
pid = pilot['uid']
# NOTE: no unit sandboxes defined!
src_context = {'pwd' : pilot['client_sandbox'],
'pilot' : pilot['pilot_sandbox'],
'resource': pilot['resource_sandbox']}
tgt_context = {'pwd' : pilot['pilot_sandbox'],
'pilot' : pilot['pilot_sandbox'],
'resource': pilot['resource_sandbox']}
# Iterate over all directives
for sd in sds:
# TODO: respect flags in directive
action = sd['action']
flags = sd['flags']
did = sd['uid']
src = sd['source']
tgt = sd['target']
assert(action in [rpc.COPY, rpc.LINK, rpc.MOVE, rpc.TRANSFER])
self._prof.prof('staging_in_start', uid=pid, msg=did)
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
if action in [rpc.COPY, rpc.LINK, rpc.MOVE]:
self._prof.prof('staging_in_fail', uid=pid, msg=did)
raise ValueError("invalid action '%s' on pilot level" % action)
self._log.info('transfer %s to %s', src, tgt)
# FIXME: make sure that tgt URL points to the right resource
# FIXME: honor sd flags if given (recursive...)
flags = rsfs.CREATE_PARENTS
if os.path.isdir(src.path):
flags |= rsfs.RECURSIVE
# Define and open the staging directory for the pilot
# We use the target dir construct here, so that we can create
# the directory if it does not yet exist.
# url used for cache (sandbox url w/o path)
fs_url = rs.Url(pilot['pilot_sandbox'])
fs_url.path = '/'
key = str(fs_url)
self._log.debug("rs.file.Directory ('%s')", key)
with self._cache_lock:
if key in self._saga_fs_cache:
fs = self._saga_fs_cache[key]
else:
fs = rsfs.Directory(fs_url, session=self._session)
self._saga_fs_cache[key] = fs
fs.copy(src, tgt, flags=flags)
sd['state'] = rps.DONE
self._prof.prof('staging_in_stop', uid=pid, msg=did)
self.publish(rpc.CONTROL_PUBSUB, {'cmd': 'pilot_staging_input_result',
'arg': {'pilot': pilot,
'sds' : sds}})
# --------------------------------------------------------------------------
#
def _handle_pilot_output_staging(self, pilot, sds):
pid = pilot['uid']
# NOTE: no unit sandboxes defined!
src_context = {'pwd' : pilot['pilot_sandbox'],
'pilot' : pilot['pilot_sandbox'],
'resource': pilot['resource_sandbox']}
tgt_context = {'pwd' : pilot['client_sandbox'],
'pilot' : pilot['pilot_sandbox'],
'resource': pilot['resource_sandbox']}
# Iterate over all directives
for sd in sds:
try:
action = sd['action']
flags = sd['flags']
did = sd['uid']
src = sd['source']
tgt = sd['target']
assert(action in [rpc.COPY, rpc.LINK, rpc.MOVE, rpc.TRANSFER])
self._prof.prof('staging_out_start', uid=pid, msg=did)
if action in [rpc.COPY, rpc.LINK, rpc.MOVE]:
raise ValueError("invalid pilot action '%s'" % action)
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
self._log.info('transfer %s to %s', src, tgt)
# FIXME: make sure that tgt URL points to the right resource
# FIXME: honor sd flags if given (recursive...)
flags = rsfs.CREATE_PARENTS
if os.path.isdir(src.path):
flags |= rsfs.RECURSIVE
# Define and open the staging directory for the pilot
# url used for cache (sandbox url w/o path)
fs_url = rs.Url(pilot['pilot_sandbox'])
fs_url.path = '/'
key = str(fs_url)
with self._cache_lock:
if key in self._saga_fs_cache:
fs = self._saga_fs_cache[key]
else:
fs = rsfs.Directory(fs_url, session=self._session)
self._saga_fs_cache[key] = fs
fs.copy(src, tgt, flags=flags)
sd['state'] = rps.DONE
self._prof.prof('staging_out_stop', uid=pid, msg=did)
except:
self._log.exception('pilot level staging failed')
self._prof.prof('staging_out_fail', uid=pid, msg=did)
sd['state'] = rps.FAILED
self.publish(rpc.CONTROL_PUBSUB,
{'cmd': 'pilot_staging_output_result',
'arg': {'pilot': pilot,
'sds' : [sd]}})
# --------------------------------------------------------------------------
#
def _pilot_watcher_cb(self):
# FIXME: we should actually use SAGA job state notifications!
# FIXME: check how race conditions are handles: we may detect
# a finalized SAGA job and change the pilot state -- but that
# pilot may have transitioned into final state via the normal
# notification mechanism already. That probably should be sorted
# out by the pilot manager, which will receive notifications for
# both transitions. As long as the final state is the same,
# there should be no problem anyway. If it differs, the
# 'cleaner' final state should prevail, in this ordering:
# cancel
# timeout
# error
# disappeared
# This implies that we want to communicate 'final_cause'
# we don't want to lock our members all the time. For that reason we
# use a copy of the pilots_tocheck list and iterate over that, and only
# lock other members when they are manipulated.
tc = rs.job.Container()
with self._pilots_lock, self._check_lock:
for pid in self._checking:
tc.add(self._pilots[pid]['job'])
states = tc.get_states()
self._log.debug('bulk states: %s', states)
# if none of the states is final, we have nothing to do.
# We can't rely on the ordering of tasks and states in the task
# container, so we hope that the task container's bulk state query lead
# to a caching of state information, and we thus have cache hits when
# querying the pilots individually
final_pilots = list()
with self._pilots_lock, self._check_lock:
for pid in self._checking:
state = self._pilots[pid]['job'].state
self._log.debug('saga job state: %s %s', pid, state)
if state in [rs.job.DONE, rs.job.FAILED, rs.job.CANCELED]:
pilot = self._pilots[pid]['pilot']
if state == rs.job.DONE : pilot['state'] = rps.DONE
if state == rs.job.FAILED : pilot['state'] = rps.FAILED
if state == rs.job.CANCELED: pilot['state'] = rps.CANCELED
final_pilots.append(pilot)
if final_pilots:
for pilot in final_pilots:
with self._check_lock:
# stop monitoring this pilot
self._checking.remove(pilot['uid'])
self._log.debug('final pilot %s %s', pilot['uid'], pilot['state'])
self.advance(final_pilots, push=False, publish=True)
# all checks are done, final pilots are weeded out. Now check if any
# pilot is scheduled for cancellation and is overdue, and kill it
# forcefully.
to_cancel = list()
with self._pilots_lock:
for pid in self._pilots:
pilot = self._pilots[pid]['pilot']
time_cr = pilot.get('cancel_requested')
# check if the pilot is final meanwhile
if pilot['state'] in rps.FINAL:
continue
if time_cr and time_cr + JOB_CANCEL_DELAY < time.time():
self._log.debug('pilot needs killing: %s : %s + %s < %s',
pid, time_cr, JOB_CANCEL_DELAY, time.time())
del(pilot['cancel_requested'])
self._log.debug(' cancel pilot %s', pid)
to_cancel.append(pid)
if to_cancel:
self._kill_pilots(to_cancel)
return True
# --------------------------------------------------------------------------
#
def _cancel_pilots(self, pids):
'''
Send a cancellation request to the pilots. This call will not wait for
the request to get enacted, nor for it to arrive, but just send it.
'''
if not pids or not self._pilots:
# nothing to do
return
# recod time of request, so that forceful termination can happen
# after a certain delay
now = time.time()
with self._pilots_lock:
for pid in pids:
if pid in self._pilots:
self._log.debug('update cancel req: %s %s', pid, now)
self._pilots[pid]['pilot']['cancel_requested'] = now
# --------------------------------------------------------------------------
#
def _kill_pilots(self, pids):
'''
Forcefully kill a set of pilots. For pilots which have just recently be
cancelled, we will wait a certain amount of time to give them a chance
to termimate on their own (which allows to flush profiles and logfiles,
etc). After that delay, we'll make sure they get killed.
'''
self._log.debug('killing pilots: %s', pids)
if not pids or not self._pilots:
# nothing to do
return
# find the most recent cancellation request
with self._pilots_lock:
self._log.debug('killing pilots: %s',
[p['pilot'].get('cancel_requested', 0)
for p in list(self._pilots.values())])
last_cancel = max([p['pilot'].get('cancel_requested', 0)
for p in list(self._pilots.values())])
self._log.debug('killing pilots: last cancel: %s', last_cancel)
# we wait for up to JOB_CANCEL_DELAY for a pilt
while time.time() < (last_cancel + JOB_CANCEL_DELAY):
self._log.debug('killing pilots: check %s < %s + %s',
time.time(), last_cancel, JOB_CANCEL_DELAY)
alive_pids = list()
for pid in pids:
if pid not in self._pilots:
self._log.error('unknown: %s', pid)
raise ValueError('unknown pilot %s' % pid)
pilot = self._pilots[pid]['pilot']
if pilot['state'] not in rps.FINAL:
self._log.debug('killing pilots: alive %s', pid)
alive_pids.append(pid)
else:
self._log.debug('killing pilots: dead %s', pid)
pids = alive_pids
if not alive_pids:
# nothing to do anymore
return
# avoid busy poll)
time.sleep(1)
to_advance = list()
# we don't want the watcher checking for these pilot anymore
with self._check_lock:
for pid in pids:
if pid in self._checking:
self._checking.remove(pid)
self._log.debug('killing pilots: kill! %s', pids)
try:
with self._pilots_lock:
tc = rs.job.Container()
for pid in pids:
if pid not in self._pilots:
self._log.error('unknown: %s', pid)
raise ValueError('unknown pilot %s' % pid)
pilot = self._pilots[pid]['pilot']
job = self._pilots[pid]['job']
# don't overwrite resource_details from the agent
#
if 'resource_details' in pilot:
del(pilot['resource_details'])
if pilot['state'] in rps.FINAL:
continue
self._log.debug('plan cancellation of %s : %s', pilot, job)
to_advance.append(pilot)
self._log.debug('request cancel for %s', pilot['uid'])
tc.add(job)
self._log.debug('cancellation start')
tc.cancel()
tc.wait()
self._log.debug('cancellation done')
# set canceled state
self.advance(to_advance, state=rps.CANCELED, push=False, publish=True)
except Exception:
self._log.exception('pilot kill failed')
return True
# --------------------------------------------------------------------------
#
def work(self, pilots):
if not isinstance(pilots, list):
pilots = [pilots]
self.advance(pilots, rps.PMGR_LAUNCHING, publish=True, push=False)
# We can only use bulk submission for pilots which go to the same
# target, thus we sort them into buckets and lunch the buckets
# individually
buckets = dict()
for pilot in pilots:
resource = pilot['description']['resource']
schema = pilot['description']['access_schema']
if resource not in buckets:
buckets[resource] = dict()
if schema not in buckets[resource]:
buckets[resource][schema] = list()
buckets[resource][schema].append(pilot)
for resource in buckets:
for schema in buckets[resource]:
try:
pilots = buckets[resource][schema]
pids = [p['uid'] for p in pilots]
self._log.info("Launching pilots on %s: %s", resource, pids)
self._start_pilot_bulk(resource, schema, pilots)
self.advance(pilots, rps.PMGR_ACTIVE_PENDING, push=False, publish=True)
except Exception:
self._log.exception('bulk launch failed')
self.advance(pilots, rps.FAILED, push=False, publish=True)
# --------------------------------------------------------------------------
#
def _start_pilot_bulk(self, resource, schema, pilots):
"""
For each pilot, we prepare by determining what files need to be staged,
and what job description needs to be submitted.
We expect `_prepare_pilot(resource, pilot)` to return a dict with:
{
'js': saga.job.Description,
'ft': [
{ 'src': string # absolute source file name
'tgt': string # relative target file name
'rem': bool # shall we remove src?
},
... ]
}
When transfering data, we'll ensure that each src is only transferred
once (in fact, we put all src files into a tarball and unpack that on
the target side).
The returned dicts are expected to only contain files which actually
need staging, ie. which have not been staged during a previous pilot
submission. That implies one of two things: either this component is
stateful, and remembers what has been staged -- which makes it difficult
to use multiple component instances; or the component inspects the
target resource for existing files -- which involves additional
expensive remote hops.
FIXME: since neither is implemented at this point we won't discuss the
tradeoffs further -- right now files are unique per pilot bulk.
Once all dicts are collected, we create one additional file which
contains the staging information, and then pack all src files into
a tarball for staging. We transfer the tarball, and *immediately*
trigger the untaring on the target resource, which is thus *not* part of
the bootstrapping process.
NOTE: this is to avoid untaring race conditions for multiple pilots, and
also to simplify bootstrapping dependencies -- the bootstrappers
are likely within the tarball after all...
"""
rcfg = self._session.get_resource_config(resource, schema)
sid = self._session.uid
# ----------------------------------------------------------------------
# the rcfg can contain keys with string expansion placeholders where
# values from the pilot description need filling in. A prominent
# example is `%(pd.project)s`, where the pilot description's `PROJECT`
# value needs to be filled in (here in lowercase).
#
# FIXME: right now we assume all pilot descriptions to contain similar
# entries, so that the expansion is only done on the first PD.
expand = dict()
pd = pilots[0]['description']
for k,v in pd.items():
if v is None:
v = ''
expand['pd.%s' % k] = v
if isinstance(v, str):
expand['pd.%s' % k.upper()] = v.upper()
expand['pd.%s' % k.lower()] = v.lower()
else:
expand['pd.%s' % k.upper()] = v
expand['pd.%s' % k.lower()] = v
for k in rcfg:
if isinstance(rcfg[k], str):
orig = rcfg[k]
rcfg[k] = rcfg[k] % expand
expanded = rcfg[k]
if orig != expanded:
self._log.debug('RCFG:\n%s\n%s', orig, expanded)
# we create a fake session_sandbox with all pilot_sandboxes in /tmp, and
# then tar it up. Once we untar that tarball on the target machine, we
# should have all sandboxes and all files required to bootstrap the
# pilots
# FIXME: on untar, there is a race between multiple launcher components
# within the same session toward the same target resource.
tmp_dir = os.path.abspath(tempfile.mkdtemp(prefix='rp_agent_tar_dir'))
tar_name = '%s.%s.tgz' % (sid, self.uid)
tar_tgt = '%s/%s' % (tmp_dir, tar_name)
tar_url = rs.Url('file://localhost/%s' % tar_tgt)
# we need the session sandbox url, but that is (at least in principle)
# dependent on the schema to use for pilot startup. So we confirm here
# that the bulk is consistent wrt. to the schema. Also include
# `staging_input` files and place them in the `staging_area`.
#
# FIXME: if it is not, it needs to be splitted into schema-specific
# sub-bulks
#
schema = pd.get('access_schema')
for pilot in pilots[1:]:
assert(schema == pilot['description'].get('access_schema')), \
'inconsistent scheme on launch / staging'
# get and expand sandboxes
session_sandbox = self._session._get_session_sandbox(pilots[0]).path
session_sandbox = session_sandbox % expand
# we will create the session sandbox before we untar, so we can use that
# as workdir, and pack all paths relative to that session sandbox. That
# implies that we have to recheck that all URLs in fact do point into
# the session sandbox.
#
# We also create a file `staging_output.json` for each pilot which
# contains the list of files to be tarred up and prepared for output
# staging
ft_list = list() # files to stage
jd_list = list() # jobs to submit
for pilot in pilots:
os.makedirs('%s/%s' % (tmp_dir, pilot['uid']))
info = self._prepare_pilot(resource, rcfg, pilot, expand)
ft_list += info['ft']
jd_list.append(info['jd'])
self._prof.prof('staging_in_start', uid=pilot['uid'])
for fname in ru.as_list(pilot['description'].get('input_staging')):
ft_list.append({'src': fname,
'tgt': '%s/staging_area/%s'
% (pilot['uid'], os.path.basename(fname)),
'rem': False})
output_staging = pilot['description'].get('output_staging')
if output_staging:
fname = '%s/%s/staging_output.txt' % (tmp_dir, pilot['uid'])
with open(fname, 'w') as fout:
for entry in output_staging:
fout.write('%s\n' % entry)
for ft in ft_list:
src = os.path.abspath(ft['src'])
tgt = os.path.relpath(os.path.normpath(ft['tgt']), session_sandbox)
# src_dir = os.path.dirname(src)
tgt_dir = os.path.dirname(tgt)
if tgt_dir.startswith('..'):
# raise ValueError('staging tgt %s outside pilot sbox: %s' % (ft['tgt'], tgt))
tgt = ft['tgt']
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir('%s/%s' % (tmp_dir, tgt_dir)):
os.makedirs('%s/%s' % (tmp_dir, tgt_dir))
if src == '/dev/null':
# we want an empty file -- touch it (tar will refuse to
# handle a symlink to /dev/null)
open('%s/%s' % (tmp_dir, tgt), 'a').close()
else:
# use a shell callout to account for wildcard expansion
cmd = 'ln -s %s %s/%s' % (os.path.abspath(src), tmp_dir, tgt)
out, err, ret = ru.sh_callout(cmd, shell=True)
if ret:
self._log.debug('out: %s', out)
self._log.debug('err: %s', err)
raise RuntimeError('callout failed: %s' % cmd)
# tar. If any command fails, this will raise.
cmd = "cd %s && tar zchf %s *" % (tmp_dir, tar_tgt)
out, err, ret = ru.sh_callout(cmd, shell=True)
if ret:
self._log.debug('out: %s', out)
self._log.debug('err: %s', err)
raise RuntimeError('callout failed: %s' % cmd)
# remove all files marked for removal-after-pack
for ft in ft_list:
if ft['rem']:
os.unlink(ft['src'])
fs_endpoint = rcfg['filesystem_endpoint']
fs_url = rs.Url(fs_endpoint)
key = str(fs_url)
self._log.debug("rs.file.Directory ('%s')", fs_url)
with self._cache_lock:
if key in self._saga_fs_cache:
fs = self._saga_fs_cache[key]
else:
fs = rsfs.Directory(fs_url, session=self._session)
self._saga_fs_cache[key] = fs
tar_rem = rs.Url(fs_url)
tar_rem.path = "%s/%s" % (session_sandbox, tar_name)
fs.copy(tar_url, tar_rem, flags=rsfs.CREATE_PARENTS)
shutil.rmtree(tmp_dir)
# we now need to untar on the target machine.
js_url = ru.Url(pilots[0]['js_url'])
# well, we actually don't need to talk to the rm, but only need
# a shell on the headnode. That seems true for all ResourceManager we use right
# now. So, lets convert the URL:
if '+' in js_url.scheme:
parts = js_url.scheme.split('+')
if 'gsissh' in parts: js_url.scheme = 'gsissh'
elif 'ssh' in parts: js_url.scheme = 'ssh'
else:
# In the non-combined '+' case we need to distinguish between
# a url that was the result of a hop or a local rm.
if js_url.scheme not in ['ssh', 'gsissh']:
js_url.scheme = 'fork'
js_url.host = 'localhost'
with self._cache_lock:
if js_url in self._saga_js_cache:
js_tmp = self._saga_js_cache[js_url]
else:
js_tmp = rs.job.Service(js_url, session=self._session)
self._saga_js_cache[js_url] = js_tmp
# cmd = "tar zmxvf %s/%s -C / ; rm -f %s" % \
cmd = "tar zmxvf %s/%s -C %s" % \
(session_sandbox, tar_name, session_sandbox)
j = js_tmp.run_job(cmd)
j.wait()
self._log.debug('tar cmd : %s', cmd)
self._log.debug('tar done: %s, %s, %s', j.state, j.stdout, j.stderr)
for pilot in pilots:
self._prof.prof('staging_in_stop', uid=pilot['uid'])
self._prof.prof('submission_start', uid=pilot['uid'])
# look up or create JS for actual pilot submission. This might result
# in the same js url as above, or not.
js_ep = rcfg['job_manager_endpoint']
with self._cache_lock:
if js_ep in self._saga_js_cache:
js = self._saga_js_cache[js_ep]
else:
js = rs.job.Service(js_ep, session=self._session)
self._saga_js_cache[js_ep] = js
# now that the scripts are in place and configured,
# we can launch the agent
jc = rs.job.Container()
for jd in jd_list:
self._log.debug('jd: %s', pprint.pformat(jd.as_dict()))
jc.add(js.create_job(jd))
jc.run()
# we assume here that the tasks arrive in the same order as the job
# descriptions. For uniform sets of pilots the order does not matter
# much though. Either way, this needs confirming on SAGA level
# FIXME
for j,jd in zip(jc.get_tasks(), jd_list):
# do a quick error check
if j.state == rs.FAILED:
self._log.error('%s: %s : %s : %s', j.id, j.state, j.stderr, j.stdout)
raise RuntimeError("SAGA Job state is FAILED. (%s)" % jd.name)
pilot = None
pid = jd.name
for p in pilots:
if p['uid'] == pid:
pilot = p
break
assert(pilot)
# Update the Pilot's state to 'PMGR_ACTIVE_PENDING' if SAGA job
# submission was successful. Since the pilot leaves the scope of
# the PMGR for the time being, we update the complete DB document
pilot['$all'] = True
# FIXME: update the right pilot
with self._pilots_lock:
self._pilots[pid] = dict()
self._pilots[pid]['pilot'] = pilot
self._pilots[pid]['job'] = j
# make sure we watch that pilot
with self._check_lock:
self._checking.append(pid)
for pilot in pilots:
self._prof.prof('submission_stop', uid=pilot['uid'])
# --------------------------------------------------------------------------
#
def _prepare_pilot(self, resource, rcfg, pilot, expand):
pid = pilot["uid"]
ret = {'ft': list(),
'jd': None }
# ----------------------------------------------------------------------
# Database connection parameters
sid = self._session.uid
database_url = self._session.cfg.dburl
# some default values are determined at runtime
default_virtenv = '%%(resource_sandbox)s/ve.%s.%s' % \
(resource, self._rp_version)
# ----------------------------------------------------------------------
# pilot description and resource configuration
number_cores = pilot['description']['cores']
number_gpus = pilot['description']['gpus']
required_memory = pilot['description']['memory']
runtime = pilot['description']['runtime']
app_comm = pilot['description']['app_comm']
queue = pilot['description']['queue']
job_name = pilot['description']['job_name']
project = pilot['description']['project']
cleanup = pilot['description']['cleanup']
candidate_hosts = pilot['description']['candidate_hosts']
# ----------------------------------------------------------------------
# get parameters from resource cfg, set defaults where needed
agent_launch_method = rcfg.get('agent_launch_method')
agent_dburl = rcfg.get('agent_mongodb_endpoint', database_url)
agent_spawner = rcfg.get('agent_spawner', DEFAULT_AGENT_SPAWNER)
rc_agent_config = rcfg.get('agent_config', DEFAULT_AGENT_CONFIG)
agent_scheduler = rcfg.get('agent_scheduler')
tunnel_bind_device = rcfg.get('tunnel_bind_device')
default_queue = rcfg.get('default_queue')
forward_tunnel_endpoint = rcfg.get('forward_tunnel_endpoint')
resource_manager = rcfg.get('resource_manager')
mpi_launch_method = rcfg.get('mpi_launch_method', '')
pre_bootstrap_0 = rcfg.get('pre_bootstrap_0', [])
pre_bootstrap_1 = rcfg.get('pre_bootstrap_1', [])
python_interpreter = rcfg.get('python_interpreter')
task_launch_method = rcfg.get('task_launch_method')
rp_version = rcfg.get('rp_version', DEFAULT_RP_VERSION)
virtenv_mode = rcfg.get('virtenv_mode', DEFAULT_VIRTENV_MODE)
virtenv = rcfg.get('virtenv', default_virtenv)
cores_per_node = rcfg.get('cores_per_node', 0)
gpus_per_node = rcfg.get('gpus_per_node', 0)
lfs_path_per_node = rcfg.get('lfs_path_per_node', None)
lfs_size_per_node = rcfg.get('lfs_size_per_node', 0)
python_dist = rcfg.get('python_dist')
virtenv_dist = rcfg.get('virtenv_dist', DEFAULT_VIRTENV_DIST)
cu_tmp = rcfg.get('cu_tmp')
spmd_variation = rcfg.get('spmd_variation')
shared_filesystem = rcfg.get('shared_filesystem', True)
stage_cacerts = rcfg.get('stage_cacerts', False)
cu_pre_exec = rcfg.get('cu_pre_exec')
cu_post_exec = rcfg.get('cu_post_exec')
export_to_cu = rcfg.get('export_to_cu')
mandatory_args = rcfg.get('mandatory_args', [])
system_architecture = rcfg.get('system_architecture', {})
saga_jd_supplement = rcfg.get('saga_jd_supplement', {})
self._log.debug(cores_per_node)
self._log.debug(pprint.pformat(rcfg))
# make sure that mandatory args are known
for ma in mandatory_args:
if pilot['description'].get(ma) is None:
raise ValueError('attribute "%s" is required for "%s"'
% (ma, resource))
# get pilot and global sandbox
resource_sandbox = self._session._get_resource_sandbox(pilot)
session_sandbox = self._session._get_session_sandbox (pilot)
pilot_sandbox = self._session._get_pilot_sandbox (pilot)
client_sandbox = self._session._get_client_sandbox ()
pilot['resource_sandbox'] = str(resource_sandbox) % expand
pilot['session_sandbox'] = str(session_sandbox) % expand
pilot['pilot_sandbox'] = str(pilot_sandbox) % expand
pilot['client_sandbox'] = str(client_sandbox)
# from here on we need only paths
resource_sandbox = resource_sandbox.path % expand
session_sandbox = session_sandbox .path % expand
pilot_sandbox = pilot_sandbox .path % expand
# client_sandbox = client_sandbox # not expanded
# Agent configuration that is not part of the public API.
# The agent config can either be a config dict, or
# a string pointing to a configuration name. If neither
# is given, check if 'RADICAL_PILOT_AGENT_CONFIG' is
# set. The last fallback is 'agent_default'
agent_config = pilot['description'].get('_config')
if not agent_config:
agent_config = os.environ.get('RADICAL_PILOT_AGENT_CONFIG')
if not agent_config:
agent_config = rc_agent_config
if not job_name:
job_name = pid
if isinstance(agent_config, dict):
# use dict as is
agent_cfg = agent_config
elif isinstance(agent_config, str):
try:
# interpret as a config name
agent_cfg_file = os.path.join(self._conf_dir, "agent_%s.json" % agent_config)
self._log.info("Read agent config file: %s", agent_cfg_file)
agent_cfg = ru.Config(path=agent_cfg_file)
# allow for user level overload
user_cfg_file = '%s/.radical/pilot/config/%s' \
% (os.environ['HOME'], os.path.basename(agent_cfg_file))
if os.path.exists(user_cfg_file):
self._log.info("merging user config: %s" % user_cfg_file)
user_cfg = ru.read_json(user_cfg_file)
ru.dict_merge (agent_cfg, user_cfg, policy='overwrite')
except Exception as e:
self._log.exception("Error reading agent config file: %s" % e)
raise
else:
# we can't handle this type
raise TypeError('agent config must be string (config name) or dict')
# expand variables in virtenv string
virtenv = virtenv % {'pilot_sandbox' : pilot_sandbox,
'session_sandbox' : session_sandbox,
'resource_sandbox': resource_sandbox}
# Check for deprecated global_virtenv
if 'global_virtenv' in rcfg:
raise RuntimeError("'global_virtenv' is deprecated (%s)" % resource)
# Create a host:port string for use by the bootstrap_0.
db_url = rs.Url(agent_dburl)
if db_url.port:
db_hostport = "%s:%d" % (db_url.host, db_url.port)
else:
db_hostport = "%s:%d" % (db_url.host, 27017) # mongodb default
# ----------------------------------------------------------------------
# the version of the agent is derived from
# rp_version, which has the following format
# and interpretation:
#
# case rp_version:
# @<token>:
# @tag/@branch/@commit: # no sdist staging
# git clone $github_base radical.pilot.src
# (cd radical.pilot.src && git checkout token)
# pip install -t $VIRTENV/rp_install/ radical.pilot.src
# rm -rf radical.pilot.src
# export PYTHONPATH=$VIRTENV/rp_install:$PYTHONPATH
#
# release: # no sdist staging
# pip install -t $VIRTENV/rp_install radical.pilot
# export PYTHONPATH=$VIRTENV/rp_install:$PYTHONPATH
#
# local: # needs sdist staging
# tar zxf $sdist.tgz
# pip install -t $VIRTENV/rp_install $sdist/
# export PYTHONPATH=$VIRTENV/rp_install:$PYTHONPATH
#
# debug: # needs sdist staging
# tar zxf $sdist.tgz
# pip install -t $SANDBOX/rp_install $sdist/
# export PYTHONPATH=$SANDBOX/rp_install:$PYTHONPATH
#
# installed: # no sdist staging
# true
# esac
#
# virtenv_mode
# private : error if ve exists, otherwise create, then use
# update : update if ve exists, otherwise create, then use
# create : use if ve exists, otherwise create, then use
# use : use if ve exists, otherwise error, then exit
# recreate: delete if ve exists, otherwise create, then use
#
# examples :
# virtenv@v0.20
# virtenv@devel
# virtenv@release
# virtenv@installed
# stage@local
# stage@/tmp/my_agent.py
#
# Note that some combinations may be invalid,
# specifically in the context of virtenv_mode. If, for
# example, virtenv_mode is 'use', then the 'virtenv:tag'
# will not make sense, as the virtenv is not updated.
# In those cases, the virtenv_mode is honored, and
# a warning is printed.
#
# Also, the 'stage' mode can only be combined with the
# 'local' source, or with a path to the agent (relative
# to root_dir, or absolute).
#
# A rp_version which does not adhere to the
# above syntax is ignored, and the fallback stage@local
# is used.
if not rp_version.startswith('@') and \
rp_version not in ['installed', 'local', 'debug', 'release']:
raise ValueError("invalid rp_version '%s'" % rp_version)
if rp_version.startswith('@'):
rp_version = rp_version[1:] # strip '@'
# ----------------------------------------------------------------------
# sanity checks
if not python_dist : raise RuntimeError("missing python distribution")
if not virtenv_dist : raise RuntimeError("missing virtualenv distribution")
if not agent_spawner : raise RuntimeError("missing agent spawner")
if not agent_scheduler : raise RuntimeError("missing agent scheduler")
if not resource_manager : raise RuntimeError("missing resource manager")
if not agent_launch_method: raise RuntimeError("missing agentlaunch method")
if not task_launch_method : raise RuntimeError("missing task launch method")
# massage some values
if not queue:
queue = default_queue
if cleanup and isinstance(cleanup, bool):
# l : log files
# u : unit work dirs
# v : virtualenv
# e : everything (== pilot sandbox)
if shared_filesystem:
cleanup = 'luve'
else:
# we cannot clean the sandbox from within the agent, as the hop
# staging would then fail, and we'd get nothing back.
# FIXME: cleanup needs to be done by the pmgr.launcher, or
# someone else, really, after fetching all logs and
# profiles.
cleanup = 'luv'
# we never cleanup virtenvs which are not private
if virtenv_mode != 'private':
cleanup = cleanup.replace('v', '')
# add dists to staging files, if needed
if rp_version in ['local', 'debug']:
sdist_names = [ru.sdist_name, rs.sdist_name, self._rp_sdist_name]
sdist_paths = [ru.sdist_path, rs.sdist_path, self._rp_sdist_path]
else:
sdist_names = list()
sdist_paths = list()
# if cores_per_node is set (!= None), then we need to
# allocation full nodes, and thus round up
if cores_per_node:
cores_per_node = int(cores_per_node)
number_cores = int(cores_per_node *
math.ceil(float(number_cores) / cores_per_node))
# if gpus_per_node is set (!= None), then we need to
# allocation full nodes, and thus round up
if gpus_per_node:
gpus_per_node = int(gpus_per_node)
number_gpus = int(gpus_per_node *
math.ceil(float(number_gpus) / gpus_per_node))
# set mandatory args
bootstrap_args = ""
bootstrap_args += " -d '%s'" % ':'.join(sdist_names)
bootstrap_args += " -p '%s'" % pid
bootstrap_args += " -s '%s'" % sid
bootstrap_args += " -m '%s'" % virtenv_mode
bootstrap_args += " -r '%s'" % rp_version
bootstrap_args += " -b '%s'" % python_dist
bootstrap_args += " -g '%s'" % virtenv_dist
bootstrap_args += " -v '%s'" % virtenv
bootstrap_args += " -y '%d'" % runtime
# set optional args
if resource_manager == "CCM": bootstrap_args += " -c"
if forward_tunnel_endpoint: bootstrap_args += " -f '%s'" % forward_tunnel_endpoint
if forward_tunnel_endpoint: bootstrap_args += " -h '%s'" % db_hostport
if python_interpreter: bootstrap_args += " -i '%s'" % python_interpreter
if tunnel_bind_device: bootstrap_args += " -t '%s'" % tunnel_bind_device
if cleanup: bootstrap_args += " -x '%s'" % cleanup
for arg in pre_bootstrap_0:
bootstrap_args += " -e '%s'" % arg
for arg in pre_bootstrap_1:
bootstrap_args += " -w '%s'" % arg
agent_cfg['owner'] = 'agent.0'
agent_cfg['cores'] = number_cores
agent_cfg['gpus'] = number_gpus
agent_cfg['spawner'] = agent_spawner
agent_cfg['scheduler'] = agent_scheduler
agent_cfg['runtime'] = runtime
agent_cfg['app_comm'] = app_comm
agent_cfg['dburl'] = str(database_url)
agent_cfg['sid'] = sid
agent_cfg['pid'] = pid
agent_cfg['pmgr'] = self._pmgr
agent_cfg['logdir'] = '.'
agent_cfg['pilot_sandbox'] = pilot_sandbox
agent_cfg['session_sandbox'] = session_sandbox
agent_cfg['resource_sandbox'] = resource_sandbox
agent_cfg['resource_manager'] = resource_manager
agent_cfg['agent_launch_method'] = agent_launch_method
agent_cfg['task_launch_method'] = task_launch_method
agent_cfg['mpi_launch_method'] = mpi_launch_method
agent_cfg['cores_per_node'] = cores_per_node
agent_cfg['gpus_per_node'] = gpus_per_node
agent_cfg['lfs_path_per_node'] = lfs_path_per_node
agent_cfg['lfs_size_per_node'] = lfs_size_per_node
agent_cfg['cu_tmp'] = cu_tmp
agent_cfg['export_to_cu'] = export_to_cu
agent_cfg['cu_pre_exec'] = cu_pre_exec
agent_cfg['cu_post_exec'] = cu_post_exec
agent_cfg['resource_cfg'] = copy.deepcopy(rcfg)
agent_cfg['debug'] = self._log.getEffectiveLevel()
# we'll also push the agent config into MongoDB
pilot['cfg'] = agent_cfg
# ----------------------------------------------------------------------
# Write agent config dict to a json file in pilot sandbox.
agent_cfg_name = 'agent.0.cfg'
cfg_tmp_handle, cfg_tmp_file = tempfile.mkstemp(prefix='rp.agent_cfg.')
os.close(cfg_tmp_handle) # file exists now
# Convert dict to json file
self._log.debug("Write agent cfg to '%s'.", cfg_tmp_file)
self._log.debug(pprint.pformat(agent_cfg))
ru.write_json(agent_cfg, cfg_tmp_file)
ret['ft'].append({'src': cfg_tmp_file,
'tgt': '%s/%s' % (pilot_sandbox, agent_cfg_name),
'rem': True}) # purge the tmp file after packing
# ----------------------------------------------------------------------
# we also touch the log and profile tarballs in the target pilot sandbox
ret['ft'].append({'src': '/dev/null',
'tgt': '%s/%s' % (pilot_sandbox, '%s.log.tgz' % pid),
'rem': False}) # don't remove /dev/null
# only stage profiles if we profile
if self._prof.enabled:
ret['ft'].append({
'src': '/dev/null',
'tgt': '%s/%s' % (pilot_sandbox, '%s.prof.tgz' % pid),
'rem': False}) # don't remove /dev/null
# check if we have a sandbox cached for that resource. If so, we have
# nothing to do. Otherwise we create the sandbox and stage the RP
# stack etc.
# NOTE: this will race when multiple pilot launcher instances are used!
with self._cache_lock:
if resource not in self._sandboxes:
for sdist in sdist_paths:
base = os.path.basename(sdist)
ret['ft'].append({'src': sdist,
'tgt': '%s/%s' % (session_sandbox, base),
'rem': False})
# Copy the bootstrap shell script.
bootstrapper_path = os.path.abspath("%s/agent/%s"
% (self._root_dir, BOOTSTRAPPER_0))
self._log.debug("use bootstrapper %s", bootstrapper_path)
ret['ft'].append({'src': bootstrapper_path,
'tgt': '%s/%s' % (session_sandbox, BOOTSTRAPPER_0),
'rem': False})
# Some machines cannot run pip due to outdated CA certs.
# For those, we also stage an updated certificate bundle
# TODO: use booleans all the way?
if stage_cacerts:
cc_name = 'cacert.pem.gz'
cc_path = os.path.abspath("%s/agent/%s" % (self._root_dir, cc_name))
self._log.debug("use CAs %s", cc_path)
ret['ft'].append({'src': cc_path,
'tgt': '%s/%s' % (session_sandbox, cc_name),
'rem': False})
self._sandboxes[resource] = True
# ----------------------------------------------------------------------
# Create SAGA Job description and submit the pilot job
jd = rs.job.Description()
if shared_filesystem:
bootstrap_tgt = '%s/%s' % (session_sandbox, BOOTSTRAPPER_0)
else:
bootstrap_tgt = '%s/%s' % ('.', BOOTSTRAPPER_0)
jd.name = job_name
jd.executable = "/bin/bash"
jd.arguments = ['-l %s %s' % (bootstrap_tgt, bootstrap_args)]
jd.working_directory = pilot_sandbox
jd.project = project
jd.output = "bootstrap_0.out"
jd.error = "bootstrap_0.err"
jd.total_cpu_count = number_cores
jd.total_gpu_count = number_gpus
jd.total_physical_memory = required_memory
jd.processes_per_host = cores_per_node
jd.spmd_variation = spmd_variation
jd.wall_time_limit = runtime
jd.queue = queue
jd.candidate_hosts = candidate_hosts
jd.environment = dict()
jd.system_architecture = system_architecture
# we set any saga_jd_supplement keys which are not already set above
for key, val in saga_jd_supplement.items():
if not jd[key]:
self._log.debug('supplement %s: %s', key, val)
jd[key] = val
# set saga job description attribute based on env variable(s)
if os.environ.get('RADICAL_SAGA_SMT'):
try:
jd.system_architecture['smt'] = \
int(os.environ['RADICAL_SAGA_SMT'])
except Exception as e:
self._log.debug('SAGA SMT not set: %s' % e)
if self._prof.enabled:
jd.environment['RADICAL_PROFILE'] = 'TRUE'
# for condor backends and the like which do not have shared FSs, we add
# additional staging directives so that the backend system binds the
# files from the session and pilot sandboxes to the pilot job.
jd.file_transfer = list()
if not shared_filesystem:
jd.file_transfer.extend([
'site:%s/%s > %s' % (session_sandbox, BOOTSTRAPPER_0, BOOTSTRAPPER_0),
'site:%s/%s > %s' % (pilot_sandbox, agent_cfg_name, agent_cfg_name),
'site:%s/%s.log.tgz > %s.log.tgz' % (pilot_sandbox, pid, pid),
'site:%s/%s.log.tgz < %s.log.tgz' % (pilot_sandbox, pid, pid)
])
if self._prof.enabled:
jd.file_transfer.extend([
'site:%s/%s.prof.tgz > %s.prof.tgz' % (pilot_sandbox, pid, pid),
'site:%s/%s.prof.tgz < %s.prof.tgz' % (pilot_sandbox, pid, pid)
])
for sdist in sdist_names:
jd.file_transfer.extend([
'site:%s/%s > %s' % (session_sandbox, sdist, sdist)
])
if stage_cacerts:
jd.file_transfer.extend([
'site:%s/%s > %s' % (session_sandbox, cc_name, cc_name)
])
self._log.debug("Bootstrap command line: %s %s", jd.executable, jd.arguments)
ret['jd'] = jd
return ret
# ------------------------------------------------------------------------------
| 40.760178 | 93 | 0.531661 |
043bb21c47d182c2fcba3568f19c143a836ec559 | 730 | py | Python | setup.py | zzhhss/django-queryset-exts | 5f6c94fa4d76b23523f525bd285a207b313db87b | [
"MIT"
] | 1 | 2019-09-24T03:10:12.000Z | 2019-09-24T03:10:12.000Z | setup.py | zzhhss/django-queryset-exts | 5f6c94fa4d76b23523f525bd285a207b313db87b | [
"MIT"
] | null | null | null | setup.py | zzhhss/django-queryset-exts | 5f6c94fa4d76b23523f525bd285a207b313db87b | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="django-queryset-exts",
version="0.0.1",
author="zzhhss",
author_email="clayhaw@163.com",
description="A django queryset with a select_related(or prefetch_related) like method to fetch remote(e.g., api) data.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/zzhhss/django-queryset-exts",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 31.73913 | 124 | 0.675342 |
ecf39de3d9d93250d1ce24246477a1b6a42d9f04 | 390 | py | Python | languages/python/algorithm_hanoi.py | RohitAthithya/learntosolveit | fe1df98534d3af2fb3ba87c6540d9d8fa883c244 | [
"BSD-3-Clause"
] | 136 | 2015-03-06T18:11:21.000Z | 2022-03-10T22:31:40.000Z | languages/python/algorithm_hanoi.py | RohitAthithya/learntosolveit | fe1df98534d3af2fb3ba87c6540d9d8fa883c244 | [
"BSD-3-Clause"
] | 27 | 2015-01-07T01:38:03.000Z | 2021-12-22T19:20:15.000Z | languages/python/algorithm_hanoi.py | RohitAthithya/learntosolveit | fe1df98534d3af2fb3ba87c6540d9d8fa883c244 | [
"BSD-3-Clause"
] | 1,582 | 2015-01-01T20:37:06.000Z | 2022-03-30T12:29:24.000Z | #!/usr/bin/python
# Towers of Hanoi program.
disks = 3
from_tower = 'A'
to_tower = 'C'
using_tower = 'B'
def hanoi(n, from_tower, to_tower, using_tower):
if n > 0:
hanoi(n-1, from_tower, using_tower, to_tower)
print('move disk from ', from_tower, ' to ', to_tower)
hanoi(n-1, using_tower, to_tower, from_tower)
hanoi(disks, from_tower, to_tower, using_tower)
| 24.375 | 62 | 0.666667 |
0b84ad88bd0cf6b3532fe18b373f4ad4b41d9685 | 634 | py | Python | day02/p2.py | pwicks86/adventofcode2016 | 331523e45095962f5e4614bdca15cc35e3243fdb | [
"MIT"
] | null | null | null | day02/p2.py | pwicks86/adventofcode2016 | 331523e45095962f5e4614bdca15cc35e3243fdb | [
"MIT"
] | null | null | null | day02/p2.py | pwicks86/adventofcode2016 | 331523e45095962f5e4614bdca15cc35e3243fdb | [
"MIT"
] | null | null | null | from operator import add
f = open('input.txt')
lines = [l.strip() for l in f.readlines()]
dirs = dict(L=(-1,0),R=(1,0),U=(0,-1),D=(0,1))
cur_pos = (0,2)
code = []
valid_positions = [(x,y) for x in range(1,4) for y in range(1,4)]
valid_positions.extend([(2,0),(0,2),(4,2),(2,4)])
codes = {(2,0):1,(1,1):2,(2,1):3,(3,1):4,(0,2):5,(1,2):6,(2,2):7,(3,2):8,(4,2):9,(1,3):10,(2,3):11,(3,3):12,(2,4):13}
for line in lines:
for move in line:
new_pos = map(add,cur_pos, dirs[move])
if new_pos in map(list,valid_positions):
cur_pos = new_pos
code.append(hex(codes[tuple(cur_pos)])[2:])
print "".join(code)
| 33.368421 | 117 | 0.567823 |
a946f29eaee9b54b40ee242db98a39892826f9ba | 40,336 | py | Python | mmtbx/monomer_library/linking_mixins.py | whart222/cctbx_project | 32bb901af1431f845143eac06c244f20b1fbc26a | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-02-01T14:25:48.000Z | 2021-09-15T16:36:29.000Z | mmtbx/monomer_library/linking_mixins.py | whart222/cctbx_project | 32bb901af1431f845143eac06c244f20b1fbc26a | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-06-14T17:04:17.000Z | 2019-06-24T20:54:12.000Z | mmtbx/monomer_library/linking_mixins.py | whart222/cctbx_project | 32bb901af1431f845143eac06c244f20b1fbc26a | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
import time
from cctbx import sgtbx
from cctbx import geometry_restraints
from mmtbx.monomer_library import linking_setup
from mmtbx.monomer_library import linking_utils
from mmtbx.monomer_library import glyco_utils
from mmtbx.monomer_library import bondlength_defaults
from libtbx.utils import Sorry
from functools import cmp_to_key
from six.moves import range
origin_ids = geometry_restraints.linking_class.linking_class()
hydrogens = ["H", "D", "T"]
class ResidueLinkClass(dict):
def remove_link(self, residues, atom_names):
if residues in self:
if atom_names in self[residues]:
self[residues].remove(atom_names)
def _apply_link_using_proxies(link,
atom_group1,
atom_group2,
bond_params_table,
bond_asu_table,
geometry_proxy_registries,
# distance,
rt_mx_ji,
origin_id=None,
):
assert origin_id
######################################
def _get_restraint_i_seqs(atom_group1,
atom_group2,
restraint,
):
i_seqs = []
keys = restraint.cif_keywords()
if "value_dist" in keys:
attrs = [
"atom_1_comp_id",
"atom_id_1",
"atom_2_comp_id",
"atom_id_2",
]
elif "period" in keys:
attrs = [
"atom_1_comp_id",
"atom_id_1",
"atom_2_comp_id",
"atom_id_2",
"atom_3_comp_id",
"atom_id_3",
"atom_4_comp_id",
"atom_id_4",
]
elif "value_angle" in keys:
attrs = [
"atom_1_comp_id",
"atom_id_1",
"atom_2_comp_id",
"atom_id_2",
"atom_3_comp_id",
"atom_id_3",
]
elif "volume_sign" in keys:
attrs = [
"atom_centre_comp_id",
"atom_id_centre",
"atom_1_comp_id",
"atom_id_1",
"atom_2_comp_id",
"atom_id_2",
"atom_3_comp_id",
"atom_id_3",
]
elif "plane_id" in keys:
attrs = [
"atom_comp_id",
"atom_id",
]
else:
assert 0
for i, attr in enumerate(attrs):
if i%2:
# name
name = getattr(restraint, attr)
for atom in atoms:
# uses names to confirm link
if atom.name.strip()==name.strip():
i_seqs.append(atom.i_seq)
break
else:
# name not found, could be hydrogen or ...
return None
else:
# atoms
if getattr(restraint, attr)==1:
atoms = atom_group1.atoms()
else:
atoms = atom_group2.atoms()
return i_seqs
###############
def _check_i_seqs(atom_group1, atom_group2, i_seqs):
atoms = []
for i_seq in i_seqs:
for atom in list(atom_group1.atoms())+list(atom_group2.atoms()):
if atom.i_seq==i_seq:
atoms.append(atom)
break
d2 = linking_utils.get_distance2(*atoms) # XXXX needs to be sym aware
if d2>9: return False
return True
#############
assert link
count = 0
#
bond_i_seqs = []
for bond in link.bond_list:
i_seqs = _get_restraint_i_seqs(atom_group1,
atom_group2,
bond,
)
if i_seqs is None: continue
if not _check_i_seqs(atom_group1, atom_group2, i_seqs): # check distances
tmp = atom_group2
atom_group2 = atom_group1
atom_group1 = tmp
i_seqs = _get_restraint_i_seqs(atom_group1,
atom_group2,
bond,
)
if i_seqs is None: continue
value = "value_dist"
assert origin_id
proxy = geometry_restraints.bond_simple_proxy(
i_seqs=i_seqs,
distance_ideal=getattr(bond, value),
weight=1/bond.value_dist_esd**2,
origin_id=origin_id,
)
bond_params_table.update(i_seq=i_seqs[0],
j_seq=i_seqs[1],
params=proxy)
#if rt_mx_ji is None: continue
bond_asu_table.add_pair(
i_seq=i_seqs[0],
j_seq=i_seqs[1],
rt_mx_ji=rt_mx_ji,
)
count+=1
bond_i_seqs.append(i_seqs)
#
for angle in link.angle_list:
i_seqs = _get_restraint_i_seqs(atom_group1,
atom_group2,
angle,
)
if i_seqs is None: continue
proxy = geometry_restraints.angle_proxy(
i_seqs=i_seqs,
angle_ideal=angle.value_angle,
weight=1/angle.value_angle_esd**2,
origin_id=origin_id,
)
geometry_proxy_registries.angle.add_if_not_duplicated(proxy=proxy)
#
for tor in link.tor_list:
i_seqs = _get_restraint_i_seqs(atom_group1,
atom_group2,
tor,
)
if i_seqs is None: continue
proxy = geometry_restraints.dihedral_proxy(
i_seqs=i_seqs,
angle_ideal=tor.value_angle,
weight=1/tor.value_angle_esd**2,
periodicity=tor.period,
origin_id=origin_id,
)
geometry_proxy_registries.dihedral.add_if_not_duplicated(proxy=proxy)
#
for chir in link.chir_list:
i_seqs = _get_restraint_i_seqs(atom_group1,
atom_group2,
chir,
)
if i_seqs is None: continue
volume_ideal = 2.4
if chir.volume_sign[:4].lower()=="nega":
volume_ideal = -2.4
elif chir.volume_sign[:4].lower()=="zero":
volume_ideal = 0.
both_signs=False
if chir.volume_sign=='both': both_signs=True
proxy = geometry_restraints.chirality_proxy(
i_seqs=i_seqs,
volume_ideal=volume_ideal,
both_signs=both_signs,
weight=25.,
origin_id=origin_id,
)
geometry_proxy_registries.chirality.add_if_not_duplicated(proxy=proxy)
#
planes = {}
weights = {}
for plane in link.plane_list:
i_seqs = _get_restraint_i_seqs(atom_group1,
atom_group2,
plane,
)
if i_seqs is None: continue
planes.setdefault(plane.plane_id, [])
planes[plane.plane_id]+=i_seqs
weights.setdefault(plane.plane_id, [])
weights[plane.plane_id].append(1/plane.dist_esd**2)
if planes:
for plane_id in planes:
if len(planes[plane_id])<4: continue
proxy = geometry_restraints.planarity_proxy(
i_seqs=planes[plane_id],
weights=weights[plane_id],
origin_id=origin_id,
)
geometry_proxy_registries.planarity.add_if_not_duplicated(proxy=proxy)
return count, bond_i_seqs
def possible_cyclic_peptide(atom1,
atom2,
verbose=False,
):
if verbose:
print(atom1.quote(),atom2.quote())
chain1 = atom1.parent().parent().parent()
chain2 = atom1.parent().parent().parent()
if not chain1.id == chain2.id:
if verbose: print('chain id differs', chain1.id, chain2.id)
return False
fl = {}
rgs = chain1.residue_groups()
for i in range(0,-2,-1):
for atom in rgs[i].atoms():
if atom.quote()==atom1.quote():
fl[i]=atom1
break
elif atom.quote()==atom2.quote():
fl[i]=atom2
break
return len(fl)==2
def check_for_peptide_links(atom1,
atom2,
):
classes1 = linking_utils.get_classes(atom1)
classes2 = linking_utils.get_classes(atom2)
atom_group1 = atom1.parent()
atom_group2 = atom2.parent()
if classes1.common_amino_acid or classes2.common_amino_acid:
if classes1.common_amino_acid:
other = atom_group2
else:
other = atom_group1
if linking_utils.get_class(other.resname) not in ["other"]:
return None
# sulfur bridge
key = "SS"
if atom1.name.strip()=="SG" and atom2.name.strip()=="SG": # too strict?
return key, False
# test for backbond atoms
count = 0
for atom in other.atoms():
if atom.name.strip() in ["C", "N", "O"]: count+=1
if count!=3: return None
key = "TRANS"
if atom2.name.strip()=="C" and atom1.name.strip()=="N":
return key, False
elif atom1.name.strip()=="C" and atom2.name.strip()=="N":
return key, True
return False
def check_all_classes(pdb_hierarchy, class_type):
found = False
for residue_group in pdb_hierarchy.residue_groups():
for atom in residue_group.atoms():
classes = linking_utils.get_classes(atom)
if getattr(classes, class_type):
found = True
break
if found: break
return found
class linking_mixins(object):
def process_nonbonded_for_links(self,
bond_params_table,
bond_asu_table,
geometry_proxy_registries,
link_metals = True,
link_residues = True,
link_carbohydrates = True,
link_amino_acid_rna_dna = False,
link_ligands = False,
link_small_molecules = False,
max_bonded_cutoff = None,
metal_coordination_cutoff = 3.,
amino_acid_bond_cutoff = 2.,
inter_residue_bond_cutoff = 2.,
second_row_buffer = 0.5,
carbohydrate_bond_cutoff = 2.,
ligand_bond_cutoff = 2.,
small_molecule_bond_cutoff = 2.,
include_selections = None,
exclude_selections = None,
log = None,
verbose = False,
):
assert hasattr(self, "_cif")
if max_bonded_cutoff is None:
max_bonded_cutoff = max(metal_coordination_cutoff,
amino_acid_bond_cutoff,
carbohydrate_bond_cutoff,
ligand_bond_cutoff,
small_molecule_bond_cutoff,
inter_residue_bond_cutoff+second_row_buffer,
)
max_bonded_cutoff_standard = max_bonded_cutoff
if include_selections:
for selection_1, selection_2, cutoff in include_selections:
max_bonded_cutoff = max(max_bonded_cutoff, cutoff)
if max_bonded_cutoff > 15:
raise Sorry("One of the following parameters: \nmetal_coordination_"+
"cutoff, amino_acid_bond_cutoff,"+
"inter_residue_bond_cutoff, \ncarbohydrate_bond_cutoff,"
"bonds.bond_distance_cutoff \nis greater than 15A. Please check and"+
" correct these parameters.")
if verbose and log is not None:
print("""
metal_coordination_cutoff %s
amino_acid_bond_cutoff %s
carbohydrate_bond_cutoff %s
inter_residue_bond_cutoff %s
second_row_buffer %s
""" % ( metal_coordination_cutoff,
amino_acid_bond_cutoff,
carbohydrate_bond_cutoff,
inter_residue_bond_cutoff,
second_row_buffer,
), file=log)
from cctbx import crystal
from cctbx.array_family import flex
#
def _nonbonded_pair_objects(max_bonded_cutoff=3.,
i_seqs=None,
):
if i_seqs is None:
atoms = self.pdb_hierarchy.atoms()
i_seqs = flex.size_t()
for atom in atoms:
i_seqs.append(atom.i_seq)
if (self.model_indices is not None):
model_indices = self.model_indices.select(i_seqs)
conformer_indices = self.conformer_indices.select(i_seqs)
sym_excl_indices = self.sym_excl_indices.select(i_seqs)
donor_acceptor_excl_groups = self.donor_acceptor_excl_groups.select(i_seqs)
asu_mappings = self.special_position_settings.asu_mappings(
buffer_thickness=max_bonded_cutoff)
sites_cart = self.sites_cart.select(i_seqs)
asu_mappings.process_sites_cart(
original_sites=sites_cart,
site_symmetry_table=self.site_symmetry_table().select(i_seqs))
pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
nonbonded_proxies = geometry_restraints.nonbonded_sorted_asu_proxies(
model_indices=model_indices,
conformer_indices=conformer_indices,
sym_excl_indices=sym_excl_indices,
donor_acceptor_excl_groups=donor_acceptor_excl_groups,
nonbonded_params=geometry_restraints.nonbonded_params(
default_distance=1),
nonbonded_types=flex.std_string(conformer_indices.size()),
nonbonded_charges=flex.int(conformer_indices.size(), 0),
nonbonded_distance_cutoff_plus_buffer=max_bonded_cutoff,
min_cubicle_edge=5,
shell_asu_tables=[pair_asu_table])
return nonbonded_proxies, sites_cart, pair_asu_table, asu_mappings, i_seqs
#
def _nonbonded_pair_generator_geometry_restraints_sort(
nonbonded_proxies,
max_bonded_cutoff=3.):
rc = nonbonded_proxies.get_sorted(by_value="delta",
sites_cart=sites_cart,
include_proxy=True,
)
if rc is None: return
rc, junk = rc
for item in rc:
yield item
#
if(log is not None):
print("""
Automatic linking
Parameters for automatic linking
Linking & cutoffs
Metal : %-5s - %0.2f
Amimo acid : %-5s - %0.2f
Carbohydrate : %-5s - %0.2f
Ligands : %-5s - %0.2f
Small molecules : %-5s - %0.2f
Amino acid - RNA/DNA : %-5s
""" % (link_metals,
metal_coordination_cutoff,
link_residues,
amino_acid_bond_cutoff,
link_carbohydrates,
carbohydrate_bond_cutoff,
link_ligands,
ligand_bond_cutoff,
link_small_molecules,
small_molecule_bond_cutoff,
link_amino_acid_rna_dna,
), file=log)
t0=time.time()
atoms = self.pdb_hierarchy.atoms()
bond_data = []
bond_data_i_seqs = {}
simple_bonds = 0
sym_bonds = 0
link_data = []
simple_links = 0
sym_links = 0
done = ResidueLinkClass()
links = {}
custom_links = {}
exclude_out_lines = {}
# main loop
nonbonded_proxies, sites_cart, pair_asu_table, asu_mappings, nonbonded_i_seqs = \
_nonbonded_pair_objects(max_bonded_cutoff=max_bonded_cutoff,
)
initial_pair_asu_table_table = bond_asu_table.table().deep_copy()
for ii, item in enumerate(
_nonbonded_pair_generator_geometry_restraints_sort(
nonbonded_proxies=nonbonded_proxies,
max_bonded_cutoff=max_bonded_cutoff,
)
):
labels, i_seq, j_seq, distance, vdw_distance, sym_op, rt_mx_ji, proxy = item
#
# include & exclude selection
#
origin_id = None
if ( include_selections and
distance>=max_bonded_cutoff_standard
):
for selection_1, selection_2, bond_cutoff in include_selections:
if (( i_seq in selection_1 and j_seq in selection_2 ) or
( i_seq in selection_2 and j_seq in selection_1 )
):
metal_coordination_cutoff=bond_cutoff
amino_acid_bond_cutoff=bond_cutoff
carbohydrate_bond_cutoff=bond_cutoff
ligand_bond_cutoff=bond_cutoff
small_molecule_bond_cutoff=bond_cutoff
inter_residue_bond_cutoff=bond_cutoff
saccharide_bond_cutoff=bond_cutoff
link_residues=True
break
else:
continue # exclude this nonbond from consideration
exclude_this_nonbonded = False
if exclude_selections:
for selection_1, selection_2 in exclude_selections:
if selection_2: # check both
if (( i_seq in selection_1 and j_seq in selection_2 ) or
( i_seq in selection_2 and j_seq in selection_1 )
):
exclude_this_nonbonded = True
break
else:
if i_seq in selection_1 or j_seq in selection_1:
exclude_this_nonbonded = True
# XXX this is a poor job!!!
if bond_asu_table.contains(i_seq, j_seq, 0): continue
if bond_asu_table.contains(i_seq, j_seq, 1): continue
atom1 = atoms[i_seq]
atom2 = atoms[j_seq]
if exclude_this_nonbonded:
key = (selection_1,selection_2)
if key not in exclude_out_lines:
exclude_out_lines[key] = \
' bond %s\n%s%s\n%smodel %.2f' % (
atom1.id_str(),
' '*9,
atom2.id_str(),
' '*6,
distance)
continue
#
# moving sections of this to outside the loop
# - SF4
# - ZN-CYS-HIS
#
moved = ['SF4', 'F3S', 'FES']
if ( atom1.parent().resname in moved or
atom2.parent().resname in moved
): continue
moved = ['ZN', 'CYS']
if ( atom1.parent().resname.strip() in moved and
atom2.parent().resname.strip() in moved
): continue
moved = ['ZN', 'HIS']
if ( atom1.parent().resname.strip() in moved and
atom2.parent().resname.strip() in moved
): continue
#
#
if verbose:
print(i_seq, j_seq, atom1.quote(), end=' ')
print(atom2.quote(), end=' ')
print("Distance: %0.2f" % distance, rt_mx_ji, sym_op)
# don't link atoms not in the same conformer (works for models also)...
if not atom1.is_in_same_conformer_as(atom2):
assert 0
continue
# don't link atoms in same residue group
if atom1.parent().parent()==atom2.parent().parent(): continue
atom_group1 = atom1.parent()
atom_group2 = atom2.parent()
# dont't like atom groups in different altloc expect " "
if atom_group1.altloc.strip()==atom_group2.altloc.strip(): pass
elif atom_group1.altloc.strip()=="": pass
elif atom_group2.altloc.strip()=="": pass
else: continue
# don't link some classes
classes1 = linking_utils.get_classes(atom1)
classes2 = linking_utils.get_classes(atom2)
use_only_bond_cutoff = False
if verbose:
print("""
Residue classes
%s
%s """ % (classes1, classes2))
# why was this commented out???
if not link_ligands and (classes1.other or classes2.other): continue
if (not link_small_molecules and
(classes1.common_small_molecule or classes2.common_small_molecule)): continue
# is_proxy_set between any of the atoms ????????
if classes1.common_amino_acid and classes2.common_amino_acid:
if not link_residues:
continue
# special amino acid linking
# - cyclic
# - beta, delta ???
if possible_cyclic_peptide(atom1, atom2): # first & last peptide
use_only_bond_cutoff = True
if sym_op:
if classes1.common_amino_acid and classes2.common_saccharide: continue
if classes2.common_amino_acid and classes1.common_saccharide: continue
#
# bonded atoms can't link to same atom, eg MG-PG and MG-O1P
#
if bond_data:
bonded = False
if i_seq in bond_data_i_seqs:
for t_i in bond_data_i_seqs[i_seq]:
if bond_asu_table.contains(j_seq, t_i, 0): bonded = True
if j_seq in bond_data_i_seqs:
for t_i in bond_data_i_seqs[j_seq]:
if bond_asu_table.contains(i_seq, t_i, 0): bonded = True
if bonded: continue
#
key = [
atom1.id_str()[9:-1],
atom2.id_str()[9:-1],
]
key.sort()
if sym_op:
key.append(str(rt_mx_ji))
key = tuple(key)
# hydrogens
if atom1.element.strip() in hydrogens:
done[atom2.id_str()] = atom1.id_str()
if atom2.element.strip() in hydrogens:
done[atom1.id_str()] = atom2.id_str()
# bond length cutoff & some logic
if not linking_utils.is_atom_pair_linked(
atom1,
atom2,
distance=distance,
max_bonded_cutoff=max_bonded_cutoff,
amino_acid_bond_cutoff=amino_acid_bond_cutoff,
inter_residue_bond_cutoff=inter_residue_bond_cutoff,
second_row_buffer=second_row_buffer,
saccharide_bond_cutoff=carbohydrate_bond_cutoff,
metal_coordination_cutoff=metal_coordination_cutoff,
use_only_bond_cutoff=use_only_bond_cutoff,
link_metals=link_metals,
verbose=verbose,
):
if verbose:
print("is not linked", atom1.quote(),atom2.quote(),key)
print('link_metals',link_metals)
if ( atom1.element.strip().upper() in hydrogens or
atom2.element.strip().upper() in hydrogens):
pass
else:
done.setdefault(key, [])
done[key].append([atom1.name, atom2.name])
continue
# check some valences...
if not (classes1.common_element or classes2.common_element):
if not linking_utils.check_valence(self.pdb_hierarchy, atom1):
print(" Atom %s rejected from bonding due to valence issues." % atom1.quote(), file=log)
continue
if not linking_utils.check_valence(self.pdb_hierarchy, atom2):
print(" Atom %s rejected from bonding due to valence issues." % atom2.quote(), file=log)
continue
# got a link....
class1 = linking_utils.get_classes(atom1, #_group1.resname,
important_only=True,
)
class2 = linking_utils.get_classes(atom2, #_group2.resname,
important_only=True,
)
class_key = [class1, class2]
class_key.sort()
class_key = tuple(class_key)
if verbose: print('class_key',class_key)
#
if link_metals!=True and "metal" in class_key: continue
#atoms_must_be = {}
if not link_residues:
if class_key in [
("common_amino_acid", "common_amino_acid"),
#("common_amino_acid", "other"),
]:
continue
#else:
# atoms_must_be.setdefault(("common_amino_acid",
# "common_amino_acid"),["C", "N"])
# atoms_must_be.setdefault(("common_amino_acid", "other"),["C", "N"])
if not link_carbohydrates and "common_saccharide" in class_key: continue
if not link_amino_acid_rna_dna:
if "common_amino_acid" in class_key and "common_rna_dna" in class_key:
continue
#
names = [atom1.name, atom2.name]
if verbose: print('names',names)
names.sort()
atom1_key = None
atom2_key = None
if class1 in linking_setup.maximum_per_atom_links: # is one
atom1_key = atom1.id_str()
if class2 in linking_setup.maximum_per_atom_links: # is one
atom2_key = atom2.id_str()
if verbose:
print('-'*80)
print('class_key',class_key)
print('done')
for k, item in done.items():
print("> %s : %s" % (k, item))
print('key',key)
print('atom keys',atom1_key, atom2_key)
# exclude duplicate symmetry op.
if key in done:
if names in done[key]: continue
if atom1.parent().altloc==atom2.parent().altloc:
if atom1_key:
if atom1_key in done: continue
done[atom1_key] = key
if atom2_key:
if atom2_key in done: continue
done[atom2_key] = key
#
current_number_of_links = len(done.setdefault(key, []))
if(current_number_of_links >=
linking_setup.maximum_inter_residue_links.get(class_key, 1)
):
if verbose:
print("too many links:",current_number_of_links,linking_setup.maximum_inter_residue_links.get(class_key, 1), class_key)
continue
#
done[key].append(names)
done_key = key
# get all possible links
i_seqs = []
for atom in atom_group1.atoms():
i_seqs.append(atom.i_seq)
j_seqs = []
for atom in atom_group2.atoms():
j_seqs.append(atom.i_seq)
ij_seqs = []
for i in i_seqs:
for j in j_seqs:
tmp = [i,j]
tmp.sort()
ij_seqs.append(tuple(tmp))
# check that a link not already made
link_found = False
if verbose:
print('len simple bond proxies',len(geometry_proxy_registries.bond_simple.proxies))
# Consistency check - debugging only
# for bsp in geometry_proxy_registries.bond_simple.proxies:
# if bsp.i_seqs[1] not in initial_pair_asu_table_table[bsp.i_seqs[0]].keys():
# print "ERROR!!!", bsp.i_seqs
# STOP()
# Proposed fast loop: Time building additional restraints for
# ribosome went from 5272 to 204 seconds.
for p in ij_seqs:
if p[1] in initial_pair_asu_table_table[p[0]].keys():
link_found = True
break
# VERY SLOW !!! - original loop
# for bond_simple_proxy in geometry_proxy_registries.bond_simple.proxies:
# if bond_simple_proxy.i_seqs in ij_seqs:
# link_found = True
# break
if link_found: continue
# check for any link between atom groups based on residue name, eg ASN-NAG
# get predefined link
link, swap, key = linking_utils.is_atom_group_pair_linked(
atom_group1,
atom_group2,
self.mon_lib_srv,
)
if verbose:
print('link',link)
print('swap',swap)
print('key',key)
if swap:
tmp = atom_group2
atom_group2 = atom_group1
atom_group1 = tmp
space_group = self.special_position_settings.space_group()
#
if len(done_key)==2:
link_rt_mx_ji = sgtbx.rt_mx(symbol="x,y,z", t_den=space_group.t_den())
else:
link_rt_mx_ji = sgtbx.rt_mx(symbol=done_key[2], t_den=space_group.t_den())
#
if link:
# apply a standard link
origin_id = origin_ids.get_origin_id('link_%s' % key,
return_none_if_absent=True,
)
if verbose: print('apply standard link', key, origin_id)
if origin_id is None:
# user defined links should not be applied here
continue
count, bond_i_seqs = _apply_link_using_proxies(
link,
atom_group1,
atom_group2,
bond_params_table,
bond_asu_table,
geometry_proxy_registries,
rt_mx_ji=link_rt_mx_ji,
origin_id=origin_id,
)
origin_id = None
if len(bond_i_seqs)==0:
if verbose:
print('failed to link using %s' % key)
continue
links.setdefault(key, [])
links[key].append([atom_group1, atom_group2])
links[key][-1]+=bond_i_seqs[0] # odd?
if verbose: print("predefined residue named link",key)
continue
#
#if atoms_must_be:
# # this could be fancier...
# # link_residues is peptide and SG links
# atoms_must_be_key = [atom1.element.strip(), atom2.element.strip()]
# #atoms_must_be_key = [atom1.name.strip(), atom2.name.strip()]
# atoms_must_be_key.sort()
# if class_key in atoms_must_be and "S" not in atoms_must_be_key:
# if atoms_must_be[class_key]!=atoms_must_be_key:
# continue
rc = linking_utils.process_atom_groups_for_linking_single_link(
self.pdb_hierarchy,
atom1,
atom2,
verbose=verbose,
)
if not rc:
done.remove_link(done_key, names)
continue
pdbres, link_key, link_atoms = rc
assert len(link_key)==1
key = link_key[0]
link = self.mon_lib_srv.link_link_id_dict.get(key, None)
if verbose:
print('pdbres',pdbres)
print('link',link)
print('link_key',link_key)
print('link_atoms',link_atoms)
if key.find("ALPHA1")>-1 or key.find("BETA1")>-1: # is handled in elif
key, cif, bond_i_seqs = \
glyco_utils.apply_glyco_link_using_proxies_and_atoms(
atom_group2,
atom_group1,
bond_params_table,
bond_asu_table,
geometry_proxy_registries,
rt_mx_ji=link_rt_mx_ji,
link_carbon_dist=carbohydrate_bond_cutoff,
origin_id=origin_ids['glycosidic custom'],
)
links.setdefault(key, [])
links[key].append([atom_group1, atom_group2])
links[key][-1]+=bond_i_seqs
continue
elif link:
origin_id = origin_ids['link_%s' % key]
count, bond_i_seqs = _apply_link_using_proxies(
link,
atom_group1,
atom_group2,
bond_params_table,
bond_asu_table,
geometry_proxy_registries,
rt_mx_ji=link_rt_mx_ji,
origin_id=origin_id,
)
origin_id=None
links.setdefault(key, [])
links[key].append([atom_group1, atom_group2])
links[key][-1]+=bond_i_seqs[0]
continue
else:
# possible peptide or rna/dna link
rc = check_for_peptide_links(atom1, atom2)
# no peptide links across symmetry
if len(done_key)==3:
rc = None
if rc:
key, swap = rc
link = self.mon_lib_srv.link_link_id_dict.get(key)
if swap:
tmp = atom_group2
atom_group2 = atom_group1
atom_group1 = tmp
origin_id = origin_ids['link_%s' % key]
rc = _apply_link_using_proxies(link,
atom_group1,
atom_group2,
bond_params_table,
bond_asu_table,
geometry_proxy_registries,
rt_mx_ji=link_rt_mx_ji,
origin_id=origin_id,
)
if not rc:
tmp = atom_group2
atom_group2 = atom_group1
atom_group1 = tmp
rc = _apply_link_using_proxies(link,
atom_group1,
atom_group2,
bond_params_table,
bond_asu_table,
geometry_proxy_registries,
rt_mx_ji=link_rt_mx_ji,
origin_id=origin_id,
)
origin_id=None
# not added to links so not LINK record
if sym_op:
sym_links += 1
link_data.append( (atoms[i_seq].id_str(),
atoms[j_seq].id_str(),
rt_mx_ji,
key,
)
)
else:
simple_links += 1
link_data.append( (atoms[i_seq].id_str(),
atoms[j_seq].id_str(),
None, #rt_mx_ji,
key,
)
)
continue
#
custom_links.setdefault(ii, [])
custom_links[ii].append([atom_group1, atom_group2, atom1, atom2])
# simple
origin_id=origin_ids['Misc. bond']
if ((classes1.common_rna_dna or
classes1.ccp4_mon_lib_rna_dna) and
(classes2.common_rna_dna or classes2.ccp4_mon_lib_rna_dna)):
bond_name = "h-dna"
assert 0
elif (linking_utils.get_classes(atom1, important_only=True)=="metal" or
linking_utils.get_classes(atom2, important_only=True)=="metal"):
origin_id = origin_ids['metal coordination']
if sym_op:
sym_bonds += 1
bond_data.append( (atoms[i_seq].id_str(),
atoms[j_seq].id_str(),
rt_mx_ji,
origin_id,
)
)
bond_data_i_seqs.setdefault(i_seq, [])
bond_data_i_seqs.setdefault(j_seq, [])
bond_data_i_seqs[i_seq].append(j_seq)
bond_data_i_seqs[j_seq].append(i_seq)
pair_asu_table.add_pair(proxy)
else:
simple_bonds += 1
bond_data.append( (atoms[i_seq].id_str(),
atoms[j_seq].id_str(),
None, #rt_mx,
origin_id,
)
)
bond_data_i_seqs.setdefault(i_seq, [])
bond_data_i_seqs.setdefault(j_seq, [])
bond_data_i_seqs[i_seq].append(j_seq)
bond_data_i_seqs[j_seq].append(i_seq)
pair_asu_table.add_pair(proxy.i_seqs)
# END MAIN LOOP for ii, item in enumerate(nonbonded?)
#
#
if verbose:
for key in sorted(custom_links):
print('-'*80)
print(key)
for pair in custom_links[key]:
for atom in pair:
try: print(atom.quote())
except Exception: print(atom)
pair_sym_table = pair_asu_table.extract_pair_sym_table()
n_simple, n_symmetry = 0, 0
self.pdb_link_records.setdefault("LINK", [])
retain = []
for ijk, sym_pair in enumerate(pair_sym_table.iterator()):
i_seq, j_seq = sym_pair.i_seqs()
origin_id = bond_data[ijk][-1]
assert i_seq == nonbonded_i_seqs[i_seq]
assert j_seq == nonbonded_i_seqs[j_seq]
atom1 = atoms[i_seq]
atom2 = atoms[j_seq]
# check for NA linkage
classes1 = linking_utils.get_classes(atom1)
classes2 = linking_utils.get_classes(atom2)
ans = bondlength_defaults.run(atom1, atom2)
equil = 2.3
weight = 0.02
slack = 0.
if len(ans) >0:
equil = ans[0]
if len(ans) > 1:
weight = ans[1]
if len(ans) > 2:
slack = ans[2]
if equil is None:
equil = 2.3
added_to_asu_table = False
try:
#bond_asu_table.add_pair([i_seq, j_seq])
bond_asu_table.add_pair(
i_seq=i_seq,
j_seq=j_seq,
rt_mx_ji=sym_pair.rt_mx_ji)
added_to_asu_table = True
except RuntimeError as e:
error = """
Difficulties linking atoms
%s
%s
Suggestions include providing restraints for any unknown residues.
""" % (atom1.quote(), atom2.quote())
print(error, file=log)
if added_to_asu_table:
retain.append(ijk)
if (sym_pair.rt_mx_ji.is_unit_mx()): n_simple += 1
else: n_symmetry += 1
assert origin_id
bond_params_table.update(
i_seq=i_seq,
j_seq=j_seq,
params=geometry_restraints.bond_params(
distance_ideal=equil,
weight=1.0/weight**2,
slack=slack,
origin_id=origin_id,
))
# adding link to PDB
self.pdb_link_records["LINK"].append([self.pdb_atoms[i_seq],
self.pdb_atoms[j_seq],
sym_pair.rt_mx_ji
])
# output
if link_data:
print(" Number of additional links: simple=%d, symmetry=%d" % (
simple_links,
sym_bonds,
), file=log)
for label1, label2, sym_op, link_name in sorted(link_data):
if sym_op is None:
print(" Simple link: %s - %s" % (label1, label2), file=log)
for label1, label2, sym_op, bond_type in sorted(bond_data):
if sym_op:
print(" Symmetry link: %s - %s sym. op: %s" % (label1,
label2,
sym_op,
), file=log)
if(log is not None):
print(" Number of custom bonds: simple=%d, symmetry=%d" % (
n_simple, n_symmetry), file=log)
if (n_symmetry == 0):
blanks = ""
else:
blanks = " "
#
def _sort_on_id(ag1, ag2):
ag1=ag1[0]
ag2=ag2[0]
if ag1.id_str()[4:]==ag2.id_str()[4:]:
if ag1.altloc<ag2.altloc:
return -1
return 1
elif ag1.id_str()[4:]<ag2.id_str()[4:]:
return -1
return 1
#
if exclude_out_lines:
print(" Excluded links - shortest distance candidate listed for each exclusion", file=log)
for key, item in exclude_out_lines.items():
print(item, file=log)
if links:
explained = []
print(" Links applied", file=log)
for key in sorted(links):
print(" %s" % key, file=log)
links[key].sort(key=cmp_to_key(_sort_on_id))
for ag1, ag2, i_seq, j_seq in links[key]:
self.pdb_link_records["LINK"].append([self.pdb_atoms[i_seq],
self.pdb_atoms[j_seq],
"x,y,z", #link_rt_mx_ji,
])
if ag1.altloc or ag2.altloc:
print(' "%s" - "%s" : altloc "%s" - "%s"' % (
ag1.id_str(),
ag2.id_str(),
ag1.altloc,
ag2.altloc,
), file=log)
else:
print(' "%s" - "%s"' % (ag1.id_str(),
ag2.id_str(),
), file=log)
explain=""
if key.find("ALPHA")==0 or key.find("BETA")==0:
true_alpha_beta = glyco_utils.get_alpha_beta(ag2.resname,
fake=False,
)
if true_alpha_beta and key.find(true_alpha_beta.upper())==-1:
one = "a beta"
two = "an alpha"
if true_alpha_beta=="alpha":
one = "an alpha"
two = "a beta"
explain = "%s~> Even though %s is %s isomer," % (' '*7,
ag2.resname,
one)
explain += " %s linkage is required..." % (two)
if explain not in explained:
print(explain, file=log)
explained.append(explain)
if bond_data:
print(" Number of additional bonds: simple=%d, symmetry=%d" % (
simple_bonds,
sym_bonds,
), file=log)
for caption, bond_type in [
("Coordination",'metal coordination'),
("Other bonds",'bond'),
]:
print(" %s:" % caption, file=log)
for ijk, (label1, label2, sym_op, bt) in enumerate(sorted(bond_data)):
if sym_op is None and bt == bond_type and ijk in retain:
print(" Simple bond: %s - %s" % (label1, label2), file=log)
for ijk, (label1, label2, sym_op, bt) in enumerate(sorted(bond_data)):
if sym_op and bt == bond_type and ijk in retain:
print(" Symmetry bond: %s - %s sym. op: %s" % (label1,
label2,
sym_op,
), file=log)
if(log is not None):
print(' Time building additional restraints: %0.2f' % (
time.time()-t0), file=log)
| 36.437218 | 129 | 0.551716 |
d4d579843618ccf39a8c898b2a9bb0c8dc980eef | 3,223 | py | Python | tests/test_stream.py | frenzymadness/module-build | a6adae2c0799c5987eda5bec57c768acd16c2226 | [
"MIT"
] | null | null | null | tests/test_stream.py | frenzymadness/module-build | a6adae2c0799c5987eda5bec57c768acd16c2226 | [
"MIT"
] | null | null | null | tests/test_stream.py | frenzymadness/module-build | a6adae2c0799c5987eda5bec57c768acd16c2226 | [
"MIT"
] | null | null | null | import pytest
from module_build.stream import ModuleStream, ModuleStreamContext
from tests import mock_mmdv3_and_version
def test_create_module_steam():
mmd, version = mock_mmdv3_and_version()
module_stream = ModuleStream(mmd, version)
expected_version = 20210925131649
assert module_stream.name == "perl-bootstrap"
assert module_stream.stream == "devel"
assert len(module_stream.contexts) == 2
assert module_stream.version == expected_version
assert len(module_stream.components) == 178
assert module_stream.description
def test_create_module_stream_missing_stream():
mmd, version = mock_mmdv3_and_version()
with pytest.raises(Exception):
mmd.set_stream_name("")
ModuleStream(mmd, version)
def test_create_module_stream_missing_name():
mmd, version = mock_mmdv3_and_version()
with pytest.raises(Exception):
mmd.set_module_name("")
ModuleStream(mmd, version)
def test_create_module_stream_context():
mmd, version = mock_mmdv3_and_version()
index = mmd.convert_to_index()
streams = index.search_streams()
platform = "f34"
module_stream_context = ModuleStreamContext(streams[0], version, platform)
assert module_stream_context.mmd
assert module_stream_context.mmd == streams[0]
assert module_stream_context.build_opts
assert module_stream_context.version
assert module_stream_context.version == version
assert module_stream_context.rpm_macros
assert module_stream_context.context_name
assert module_stream_context.context_name == streams[0].get_context()
assert module_stream_context.module_name
assert module_stream_context.module_name == streams[0].get_module_name()
assert module_stream_context.static_context
assert module_stream_context.dependencies
assert type(module_stream_context.dependencies) is dict
assert "buildtime" in module_stream_context.dependencies
assert "runtime" in module_stream_context.dependencies
assert module_stream_context.stream
assert module_stream_context.stream == streams[0].get_stream_name()
assert type(module_stream_context.demodularized_rpms) is list
assert type(module_stream_context.rpm_macros) is list
assert type(module_stream_context.rpm_whitelist) is list
assert module_stream_context.platform == platform
def test_msc_get_nsvca():
mmd, version = mock_mmdv3_and_version()
index = mmd.convert_to_index()
streams = index.search_streams()
platform = "f34"
module_stream_context = ModuleStreamContext(streams[0], version, platform)
assert module_stream_context.get_NSVCA() == "perl-bootstrap:devel:20210925131649:f26devel"
def test_msc_set_arch():
mmd, version = mock_mmdv3_and_version()
index = mmd.convert_to_index()
streams = index.search_streams()
platform = "f34"
module_stream_context = ModuleStreamContext(streams[0], version, platform)
arch = "x86_64"
module_stream_context.set_arch(arch)
assert module_stream_context.arch == arch
assert module_stream_context.mmd.get_arch() == arch
nsvca = "perl-bootstrap:devel:20210925131649:f26devel:x86_64"
assert module_stream_context.get_NSVCA() == nsvca
| 33.572917 | 94 | 0.766677 |
ffba63721d0b9224d71fdec1fcec8c46c46427c4 | 3,981 | py | Python | tests/bulk/import_book.py | jnoortheen/django-import-export | e855fcf8ed43af4dd681435f516e9f50ee19613d | [
"BSD-2-Clause"
] | null | null | null | tests/bulk/import_book.py | jnoortheen/django-import-export | e855fcf8ed43af4dd681435f516e9f50ee19613d | [
"BSD-2-Clause"
] | null | null | null | tests/bulk/import_book.py | jnoortheen/django-import-export | e855fcf8ed43af4dd681435f516e9f50ee19613d | [
"BSD-2-Clause"
] | null | null | null | """
Helper module for testing bulk imports.
Test one of create / update or delete by uncommenting the relevant method calls in main().
Parameters can be modified to test the effect on imports.
The DB is cleared down after each test run.
Each run is profiled for duration and peak memory usage.
When testing deletes, duration and memory usage has to be tested separately.
This can be done by simply commenting out the relevant call in profile()
"""
import tablib
import time
from functools import wraps
from memory_profiler import memory_usage
import django
from import_export import resources
from import_export.instance_loaders import CachedInstanceLoader
django.setup()
from core.models import Book # isort:skip
NUM_ROWS = 250
class _BookResource(resources.ModelResource):
class Meta:
model = Book
fields = ('id', 'name', 'author_email', 'price')
use_bulk = True
batch_size = 1000
skip_diff = True
force_init_instance = True
instance_loader_class = CachedInstanceLoader
def profile(fn):
@wraps(fn)
def inner(*args, **kwargs):
fn_kwargs_str = ', '.join(f'{k}={v}' for k, v in kwargs.items())
print(f'\n{fn.__name__}({fn_kwargs_str})')
# Measure time
t = time.perf_counter()
retval = fn(*args, **kwargs)
elapsed = time.perf_counter() - t
print(f'Time {elapsed:0.4}')
# Measure memory
mem, retval = memory_usage((fn, args, kwargs), retval=True, timeout=200, interval=1e-7)
print(f'Memory {max(mem) - min(mem)}')
return retval
return inner
@profile
def do_import(resource, dataset):
resource.import_data(dataset)
def do_create():
rows = [('', 'Some new book', 'email@example.com', '10.25')] * NUM_ROWS
dataset = tablib.Dataset(*rows, headers=['id', 'name', 'author_email', 'price'])
book_resource = _BookResource()
do_import(book_resource, dataset)
assert Book.objects.count() == NUM_ROWS * 2
Book.objects.all().delete()
def do_update():
rows = [('', 'Some new book', 'email@example.com', '10.25')] * NUM_ROWS
books = [Book(name=r[1], author_email=r[2], price=r[3]) for r in rows]
Book.objects.bulk_create(books)
assert NUM_ROWS == Book.objects.count()
# deletes - there must be existing rows in the DB...
# i.e. so they can be deleted
all_books = Book.objects.all()
rows = [(b.id, b.name, b.author_email, b.price) for b in all_books]
# Add this line in order to perform bulk delete
dataset = tablib.Dataset(*rows, headers=['id', 'name', 'author_email', 'price'])
book_resource = _BookResource()
do_import(book_resource, dataset)
assert Book.objects.count() == NUM_ROWS
Book.objects.all().delete()
def do_delete():
# Run this twice - once for duration and once for memory counts
# comment out the lines in profile() as appropriate
class _BookResource(resources.ModelResource):
def for_delete(self, row, instance):
return True
class Meta:
model = Book
fields = ('id', 'name', 'author_email', 'price')
use_bulk = True
batch_size = 1000
skip_diff = True
instance_loader_class = CachedInstanceLoader
rows = [('', 'Some new book', 'email@example.com', '10.25')] * NUM_ROWS
books = [Book(name=r[1], author_email=r[2], price=r[3]) for r in rows]
Book.objects.bulk_create(books)
assert NUM_ROWS == Book.objects.count()
# deletes - there must be existing rows in the DB...
# i.e. so they can be deleted
all_books = Book.objects.all()
rows = [(b.id, b.name, b.author_email, b.price) for b in all_books]
dataset = tablib.Dataset(*rows, headers=['id', 'name', 'author_email', 'price'])
book_resource = _BookResource()
do_import(book_resource, dataset)
def main():
do_create()
#do_update()
#do_delete()
if __name__ == "__main__":
main()
| 29.058394 | 95 | 0.652097 |
7d17c0af0ebc86077c592190d11c670bcdc0adad | 2,020 | py | Python | harness/determined/tensorboard/fetchers/gcs.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | harness/determined/tensorboard/fetchers/gcs.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | harness/determined/tensorboard/fetchers/gcs.py | gh-determined-ai/determined | 9a1ab33a3a356b69681b3351629fef4ab98ddb56 | [
"Apache-2.0"
] | null | null | null | import datetime
import logging
import os
import posixpath
import urllib
from typing import Any, Dict, Generator, List, Tuple
from .base import Fetcher
logger = logging.getLogger(__name__)
class GCSFetcher(Fetcher):
def __init__(self, storage_config: Dict[str, Any], storage_paths: List[str], local_dir: str):
import google.cloud.storage
self.client = google.cloud.storage.Client()
self.bucket_name = str(storage_config["bucket"])
self.local_dir = local_dir
self.storage_paths = storage_paths
self._file_records = {} # type: Dict[str, datetime.datetime]
def _list(self, prefix: str) -> Generator[Tuple[str, datetime.datetime], None, None]:
logger.debug(f"Listing keys in bucket '{self.bucket_name}' with '{prefix}'")
prefix = urllib.parse.urlparse(prefix).path.lstrip("/")
blobs = self.client.list_blobs(self.bucket_name, prefix=prefix)
for blob in blobs:
yield (blob.name, blob.updated)
def fetch_new(self) -> int:
new_files = []
bucket = self.client.bucket(self.bucket_name)
# Look at all files in our storage location.
for storage_path in self.storage_paths:
logger.debug(f"Looking at path: {storage_path}")
for filepath, mtime in self._list(storage_path):
prev_mtime = self._file_records.get(filepath)
if prev_mtime is not None and prev_mtime >= mtime:
continue
new_files.append(filepath)
self._file_records[filepath] = mtime
# Download the new or updated files.
for filepath in new_files:
local_path = posixpath.join(self.local_dir, self.bucket_name, filepath)
dir_path = os.path.dirname(local_path)
os.makedirs(dir_path, exist_ok=True)
bucket.blob(filepath).download_to_filename(local_path)
logger.debug(f"Downloaded file to local: {local_path}")
return len(new_files)
| 33.666667 | 97 | 0.650495 |
0e559d73a5d2d5259be1b0447c42f2fa25dcbbc4 | 447 | py | Python | Wk09/Update.py | MichaelORegan/52957_DATA_REPRESENTATION | 2c9b360fe315c2235c6f47a6db4103d16984160c | [
"Apache-2.0"
] | null | null | null | Wk09/Update.py | MichaelORegan/52957_DATA_REPRESENTATION | 2c9b360fe315c2235c6f47a6db4103d16984160c | [
"Apache-2.0"
] | null | null | null | Wk09/Update.py | MichaelORegan/52957_DATA_REPRESENTATION | 2c9b360fe315c2235c6f47a6db4103d16984160c | [
"Apache-2.0"
] | null | null | null | import mysql.connector
db = mysql.connector.connect(
host="localhost",
user="root",
password="",
#user="datarep",
# # this is the user name on my mac
# #passwd="password"
# # for my mac
database="datarepresentation"
)
cursor = db.cursor()
sql="update student set name= %s, age=%s where id = %s"
values = ("Joe",33, 1)
cursor.execute(sql, values)
db.commit()
print("update done") | 22.35 | 57 | 0.58613 |
8b79933547395122cca00735ef2363d59f48d1ee | 505 | py | Python | setup.py | rl1987/trunnel | 48d506d225a424fb8dd5dc84c090cd2bf0886dd9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | rl1987/trunnel | 48d506d225a424fb8dd5dc84c090cd2bf0886dd9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | rl1987/trunnel | 48d506d225a424fb8dd5dc84c090cd2bf0886dd9 | [
"BSD-3-Clause"
] | null | null | null |
from distutils.core import setup
namespace = {}
exec(open("./lib/trunnel/__init__.py").read(), namespace)
setup(name='Trunnel',
version=namespace['__version__'],
description='Trunnel binary format parser',
author='Nick Mathewson',
author_email='nickm@torproject.org',
url='https://gitweb.torproject.org/trunnel.git/',
packages=['trunnel'],
package_dir={'': 'lib'},
package_data={'trunnel': ['data/*.c', 'data/*.h']},
license='3-clause BSD'
)
| 28.055556 | 57 | 0.629703 |
c437f24ead8aa20d1627fa0d72f3fbc8d4b2f96b | 1,537 | py | Python | algorithms/baseline/Popular.py | rn5l/rsc18 | b0e74495d797605b19b0d302e6797c681ba7748b | [
"Apache-2.0"
] | 8 | 2018-10-08T15:10:17.000Z | 2021-04-25T09:09:17.000Z | algorithms/baseline/Popular.py | rn5l/rsc18 | b0e74495d797605b19b0d302e6797c681ba7748b | [
"Apache-2.0"
] | null | null | null | algorithms/baseline/Popular.py | rn5l/rsc18 | b0e74495d797605b19b0d302e6797c681ba7748b | [
"Apache-2.0"
] | 6 | 2018-10-15T14:20:28.000Z | 2021-12-20T23:02:35.000Z | '''
Created on 13.04.2018
@author: malte
'''
from algorithms.Model import Model
import pandas as pd
import numpy as np
import time
class MostPopular(Model):
'''
classdocs
'''
def __init__(self, remind=False, return_num_preds=500 ):
self.return_num_preds = return_num_preds
self.remind = remind
def init(self, train, test):
pass
def train(self, train, test=None):
print( 'training mostpopular' )
tstart = time.time()
pop = pd.DataFrame()
pop['popularity'] = train['actions'].groupby( 'track_id' ).size()
pop.reset_index(inplace=True)
pop['confidence'] = pop['popularity'] / pop['popularity'].max()
#del pop['popularity']
pop.sort_values( ['confidence','track_id'], ascending=[False,True], inplace=True )
#self.pop = pop.head(self.return_num_preds + 100)[['track_id','confidence']] # not sure why this fails when not having "+ 100" in head()
self.pop = pop[['track_id','confidence']]
print( ' -- finished training in {}s'.format( (time.time() - tstart) ) )
def predict(self, name=None, tracks=None, playlist_id=None, artists=None, num_hidden=None):
items = tracks if tracks is not None else []
if self.remind:
res = self.pop.head(self.return_num_preds)
else:
res = self.pop[ np.in1d( self.pop.track_id, items, invert=True ) ].head(self.return_num_preds)
return res
| 33.413043 | 144 | 0.595966 |
3c4e01caaa1fd5fbeffa0584c78d4917f21d7dc3 | 660 | py | Python | 3_MapReduce Programming on MovieLens Data/solution/MapReduce/code/genreMapper.py | minakoyang/YY_Distributed_Cluster_Computing | 0284569e4a6d0312590acf9ead5cf23bbcd167c9 | [
"MIT"
] | null | null | null | 3_MapReduce Programming on MovieLens Data/solution/MapReduce/code/genreMapper.py | minakoyang/YY_Distributed_Cluster_Computing | 0284569e4a6d0312590acf9ead5cf23bbcd167c9 | [
"MIT"
] | null | null | null | 3_MapReduce Programming on MovieLens Data/solution/MapReduce/code/genreMapper.py | minakoyang/YY_Distributed_Cluster_Computing | 0284569e4a6d0312590acf9ead5cf23bbcd167c9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import csv
movieFile = "./movielens/movies.csv"
movieList = {}
with open(movieFile, mode = 'r') as infile:
reader = csv.reader(infile)
for row in reader:
movieList[row[0]] = {}
movieList[row[0]]["title"] = row[1]
movieList[row[0]]["genre"] = row[2]
for oneMovie in sys.stdin:
oneMovie = oneMovie.strip()
ratingInfo = oneMovie.split(",")
try:
genreList = movieList[ratingInfo[1]]["genre"]
rating = float(ratingInfo[2])
for genre in genreList.split("|"):
print("%s\t%s" % (genre, rating))
except ValueError:
continue | 25.384615 | 53 | 0.578788 |
2ca454aa6194a1e7ddec81e322c15f58838aa707 | 1,946 | py | Python | ml_advisor/fund_clustering/plots.py | rucsa/thesis_diversification | 732591d0a9e8042b45853dd1abe7f8f6ba769c9f | [
"MIT"
] | null | null | null | ml_advisor/fund_clustering/plots.py | rucsa/thesis_diversification | 732591d0a9e8042b45853dd1abe7f8f6ba769c9f | [
"MIT"
] | null | null | null | ml_advisor/fund_clustering/plots.py | rucsa/thesis_diversification | 732591d0a9e8042b45853dd1abe7f8f6ba769c9f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 22 18:48:34 2018
@author: rucsa
"""
import numpy as np
import matplotlib.pyplot as plt
LABEL_COLOR_MAP = {0: '#FFF0F5',
1: '#7CFC00',
2: '#FFFACD',
3: '#CD5C5C',
4: '#FF1493',
5: '#00BFFF',
6: '#696969',
7: '#1E90FF',
8: '#B22222',
9: '#FFFAF0',
10:'#228B22',
11:'#FF00FF',
12:'#DCDCDC',
13:'#F8F8FF',
14:'#FFD700',
15:'#DAA520',
16:'#008B8B',
17:'#008000',
18:'#ADFF2F',
19:'#F0FFF0',
20:'#FF69B4',
21:'#CD5C5C',
22:'#ADD8E6',
23:'#4B0082',
24:'#FFFFF0',
25:'#F0E68C',
26:'#E6E6FA'
}
def plot(Y,clusters):
#data = pd.read_hdf("hdfs/distance_matrix.hdf5", "dataset1/x")
#data = data.multiply(1)
#clusters = clusters.reset_index().drop('index', axis = 1).values[:,0]
label_color = np.array([[LABEL_COLOR_MAP[l]] for l in clusters])
#t0 = time()
#mds = manifold.MDS(2, max_iter=100, n_init=1, dissimilarity = "precomputed")
#Y = mds.fit_transform(data)
#t1 = time()
#Y = np.array([mlh.cart_to_pole(x, y) for x, y in Y])
#print("MDS: %.2g sec" % (t1 - t0))
fig, ax = plt.subplots()
for i in range(0, len(Y)):
plt.scatter(Y[i][0], Y[i][1], c=label_color[i], alpha = 0.3, label = clusters[i])
#plt.title("MDS (%.2g sec)" % (t1 - t0))
#ax.xaxis.set_major_formatter(NullFormatter())
#ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
return plt
| 29.484848 | 89 | 0.433196 |
b21a2091a928f5ba2fa0aad545d87e2d423a6f65 | 2,313 | py | Python | Others/Source/19/19.2/pie_test.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | 1 | 2018-05-30T01:38:23.000Z | 2018-05-30T01:38:23.000Z | Others/Source/19/19.2/pie_test.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | Others/Source/19/19.2/pie_test.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import matplotlib.pyplot as plt
# 准备数据
data = [0.16881, 0.14966, 0.07471, 0.06992,
0.04762, 0.03541, 0.02925, 0.02411, 0.02316, 0.01409, 0.36326]
# 准备标签
labels = ['Java', 'C', 'C++', 'Python',
'Visual Basic .NET', 'C#', 'PHP', 'JavaScript',
'SQL', 'Assembly langugage', '其他']
# 将第4个语言(Python)分离出来
explode = [0, 0, 0, 0.3, 0, 0, 0, 0, 0, 0, 0]
# 使用自定义颜色
colors=['red', 'pink', 'magenta','purple','orange']
# 将横、纵坐标轴标准化处理,保证饼图是一个正圆,否则为椭圆
plt.axes(aspect='equal')
# 控制X轴和Y轴的范围(用于控制饼图的圆心,半径)
plt.xlim(0,8)
plt.ylim(0,8)
# 绘制饼图
plt.pie(x = data, # 绘图数据
labels=labels, # 添加编程语言标签
explode=explode, # 突出显示Python
colors=colors, # 设置饼图的自定义填充色
autopct='%.3f%%', # 设置百分比的格式,此处保留3位小数
pctdistance=0.8, # 设置百分比标签与圆心的距离
labeldistance = 1.15, # 设置标签与圆心的距离
startangle = 180, # 设置饼图的初始角度
center = (4, 4), # 设置饼图的圆心(相当于X轴和Y轴的范围)
radius = 3.8, # 设置饼图的半径(相当于X轴和Y轴的范围)
counterclock = False, # 是否逆时针,这里设置为顺时针方向
wedgeprops = {'linewidth': 1, 'edgecolor':'green'},# 设置饼图内外边界的属性值
textprops = {'fontsize':12, 'color':'black'}, # 设置文本标签的属性值
frame = 1) # 是否显示饼图的圆圈,此处设为显示
# 不显示X轴和Y轴的刻度值
plt.xticks(())
plt.yticks(())
# 添加图标题
plt.title('2018年8月的编程语言指数排行榜')
# 显示图形
plt.show() | 41.303571 | 74 | 0.405534 |
d62f4ae2632fe50f41569f4be8c53b51549dcab8 | 226 | py | Python | tests/test_cases.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 43 | 2018-04-29T03:30:18.000Z | 2021-02-11T05:24:49.000Z | tests/test_cases.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 46 | 2018-05-02T01:27:34.000Z | 2022-03-26T13:29:55.000Z | tests/test_cases.py | benthecarman/wasabi-acks | e9663d845e8f63f06e5e49737966fafa5e8a1eb4 | [
"MIT"
] | 11 | 2018-05-15T23:47:47.000Z | 2021-01-27T14:57:54.000Z | from bitcoin_acks.constants import PullRequestState
pull_requests_get_all_test_cases = [
(None, 105),
(None, 0),
(PullRequestState.OPEN, 20),
(PullRequestState.MERGED, 220),
(PullRequestState.CLOSED, 5)
]
| 22.6 | 51 | 0.712389 |
1b236cdc99bed2ddc4a710a41f2e98ec24ddbf09 | 29,763 | py | Python | pandas/_testing/__init__.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | 2 | 2022-02-27T04:02:18.000Z | 2022-03-01T03:48:47.000Z | pandas/_testing/__init__.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | 1 | 2022-03-08T02:15:07.000Z | 2022-03-08T02:15:07.000Z | pandas/_testing/__init__.py | aa-182758/pandas | 53b3dd53c7f2c3d24aa77d5a1bc531b1fcd45d70 | [
"BSD-3-Clause"
] | 2 | 2022-02-27T04:02:19.000Z | 2022-03-01T03:49:21.000Z | from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_sequence,
is_unsigned_integer_dtype,
pandas_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_indexing_slices_equivalent,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_metadata_equivalent,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import ( # noqa:F401
get_dtype,
get_obj,
)
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.api import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from pandas.core.arrays import (
BaseMaskedArray,
ExtensionArray,
PandasArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_NUMPY_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_NUMPY_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
FLOAT_NUMPY_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_NUMPY_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NARROW_NP_DTYPES = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
NP_NAT_OBJECTS = [
cls("NaT", unit)
for cls in [np.datetime64, np.timedelta64]
for unit in [
"Y",
"M",
"W",
"D",
"h",
"m",
"s",
"ms",
"us",
"ns",
"ps",
"fs",
"as",
]
]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index._with_infer(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is np.ndarray or box_cls is np.array:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
"""
Similar to pd.array, but does not cast numpy dtypes to nullable dtypes.
"""
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if dtype is None:
return np.asarray(obj)
return extract_array(obj, extract_numpy=True)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
"""make a length k index or n categories"""
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
"""make a length k IntervalIndex"""
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeNumericIndex(k=10, name=None, *, dtype):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if is_integer_dtype(dtype):
values = np.arange(k, dtype=dtype)
if is_unsigned_integer_dtype(dtype):
values += 2 ** (dtype.itemsize * 8 - 1)
elif is_float_dtype(dtype):
values = np.random.random_sample(k) - np.random.random_sample(1)
values.sort()
values = values * (10 ** np.random.randint(0, 9))
else:
raise NotImplementedError(f"wrong dtype {dtype}")
return NumericIndex(values, dtype=dtype, name=name)
def makeIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="int64")
return Int64Index(base_idx)
def makeUIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="uint64")
return UInt64Index(base_idx)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="float64")
return Float64Index(base_idx)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
N = (k // 2) + 1
rng = range(N)
mi = MultiIndex.from_product([("foo", "bar"), rng], names=names, **kwargs)
assert len(mi) >= k # GH#38795
return mi[:k]
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def make_rand_series(name=None, dtype=np.float64):
index = makeStringIndex(_N)
data = np.random.randn(_N)
data = data.astype(dtype, copy=False)
return Series(data, index=index, name=name)
def makeFloatSeries(name=None):
return make_rand_series(name=name)
def makeStringSeries(name=None):
return make_rand_series(name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
# For testing, those properties return a generic callable, and not
# the actual class. In this case that is equivalent, but it is to
# ensure we don't rely on the property returning a class
# See https://github.com/pandas-dev/pandas/pull/46018 and
# https://github.com/pandas-dev/pandas/issues/32638 and linked issues
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
@property
def _constructor_expanddim(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
@property
def _constructor_sliced(self):
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = pd.core.common._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
# -----------------------------------------------------------------------------
def shares_memory(left, right) -> bool:
"""
Pandas-compat for np.shares_memory.
"""
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
return np.shares_memory(left, right)
elif isinstance(left, np.ndarray):
# Call with reversed args to get to unpacking logic below.
return shares_memory(right, left)
if isinstance(left, RangeIndex):
return False
if isinstance(left, MultiIndex):
return shares_memory(left._codes, right)
if isinstance(left, (Index, Series)):
return shares_memory(left._values, right)
if isinstance(left, NDArrayBackedExtensionArray):
return shares_memory(left._ndarray, right)
if isinstance(left, pd.core.arrays.SparseArray):
return shares_memory(left.sp_values, right)
if isinstance(left, pd.core.arrays.IntervalArray):
return shares_memory(left._left, right) or shares_memory(left._right, right)
if isinstance(left, ExtensionArray) and left.dtype == "string[pyarrow]":
# https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669
if isinstance(right, ExtensionArray) and right.dtype == "string[pyarrow]":
# error: "ExtensionArray" has no attribute "_data"
left_pa_data = left._data # type: ignore[attr-defined]
# error: "ExtensionArray" has no attribute "_data"
right_pa_data = right._data # type: ignore[attr-defined]
left_buf1 = left_pa_data.chunk(0).buffers()[1]
right_buf1 = right_pa_data.chunk(0).buffers()[1]
return left_buf1 == right_buf1
if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray):
# By convention, we'll say these share memory if they share *either*
# the _data or the _mask
return np.shares_memory(left._data, right._data) or np.shares_memory(
left._mask, right._mask
)
if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1:
arr = left._mgr.arrays[0]
return shares_memory(arr, right)
raise NotImplementedError(type(left), type(right))
| 28.590778 | 88 | 0.639418 |
e80e6111f28d5f59fa00b4891e0c54daba1f9edb | 1,355 | py | Python | player.py | deluxghost/StatusEnhancer | 5f5cc339f3ce6393e4cf56c214636a677b296dc3 | [
"Apache-2.0"
] | null | null | null | player.py | deluxghost/StatusEnhancer | 5f5cc339f3ce6393e4cf56c214636a677b296dc3 | [
"Apache-2.0"
] | null | null | null | player.py | deluxghost/StatusEnhancer | 5f5cc339f3ce6393e4cf56c214636a677b296dc3 | [
"Apache-2.0"
] | null | null | null | import re
from typing import List
from steamid import SteamID
from utils import parse_time
line_pat = re.compile(r'^#\s+(?P<userid>\d+)\s+"(?P<name>.+)"\s+(?P<steamid>\S+)\s+(?P<other>.+)$')
class Player:
def __init__(self, userid: int, name: str, steamid: str, connected: str):
self.userid = userid
self.name = name
self.steamid = SteamID(steamid)
self.connected_str = connected
self.connected = parse_time(connected)
def kick_cmd(self) -> str:
return f'callvote kick {self.userid}'
def parse_player(line: str) -> Player:
if not line.startswith('#'):
return
match = line_pat.match(line)
if not match:
return
userid = int(match.group('userid'))
name = match.group('name')
steamid = match.group('steamid')
other = match.group('other')
if steamid == 'BOT':
return
connected = ''
other_parts = other.split()
if other_parts:
connected = other_parts[0]
player = Player(userid, name, steamid, connected)
return player
def parse_status(status_str: str) -> List[Player]:
lines = status_str.split('\n')
players = list()
for line in lines:
player = parse_player(line)
if player is not None:
players.append(player)
players.sort(key=lambda p: p.userid)
return players
| 25.566038 | 99 | 0.621402 |
f2127a6f448cf4ef56e7010ff072c449d6530587 | 8,496 | py | Python | tapiriik/settings.py | pmenzel/tapiriik | a86a0ffcd75621ba0dd2b7b6e4be38e872f37b60 | [
"Apache-2.0"
] | 1,445 | 2015-01-01T21:43:31.000Z | 2022-03-17T13:40:23.000Z | tapiriik/settings.py | pmenzel/tapiriik | a86a0ffcd75621ba0dd2b7b6e4be38e872f37b60 | [
"Apache-2.0"
] | 441 | 2015-01-02T03:37:49.000Z | 2022-03-31T18:18:03.000Z | tapiriik/settings.py | pmenzel/tapiriik | a86a0ffcd75621ba0dd2b7b6e4be38e872f37b60 | [
"Apache-2.0"
] | 333 | 2015-01-06T12:14:15.000Z | 2022-03-27T19:58:48.000Z | import os
from datetime import datetime
# Django settings for tapiriik project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ["tapiriik.com", ".tapiriik.com", "localhost"]
USE_X_FORWARDED_HOST = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'C:/wamp/www/tapiriik/tapiriik/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE_JS = {
'tapiriik-js': {
'source_filenames': (
'js/jquery.address-1.5.min.js',
'js/tapiriik.js',
),
'output_filename': 'js/tapiriik.min.js',
},
'tapiriik-user-js': {
'source_filenames': (
'js/jstz.min.js',
'js/tapiriik-ng.js',
),
'output_filename': 'js/tapiriik-user.min.js',
}
}
PIPELINE_CSS = {
'tapiriik-css': {
'source_filenames': (
'css/style.css',
),
'output_filename': 'css/style.min.css',
},
}
PIPELINE_DISABLE_WRAPPER = True
# Make this unique, and don't share it with anybody.
# and yes, this is overriden in local_settings.py
SECRET_KEY = 'vag26gs^t+_y0msoemqo%_5gb*th(i!v$l6##bq9tu2ggcsn13'
# In production, webservers must have only the public key
CREDENTIAL_STORAGE_PUBLIC_KEY = b"NotTheRealKeyFYI"
CREDENTIAL_STORAGE_PRIVATE_KEY = None
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'tapiriik.web.startup.Startup',
'tapiriik.web.startup.ServiceWebStartup',
'tapiriik.auth.SessionAuth'
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies" # file-based sessions on windows are terrible
ROOT_URLCONF = 'tapiriik.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tapiriik.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"I:/wamp/www/tapiriik/tapiriik/web/templates",
)
TEMPLATE_CONTEXT_PROCESSORS = (
'tapiriik.web.views.ab_experiment_context',
'tapiriik.web.context_processors.user',
'tapiriik.web.context_processors.config',
'tapiriik.web.context_processors.js_bridge',
'tapiriik.web.context_processors.stats',
'tapiriik.web.context_processors.providers',
'tapiriik.web.context_processors.celebration_mode',
'django.core.context_processors.static',)
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tapiriik.web',
'pipeline'
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'ERROR',
'class': 'logging.StreamHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'tapiriik.testing.MongoDBTestRunner'
MONGO_HOST = "localhost"
MONGO_REPLICA_SET = None
MONGO_CLIENT_OPTIONS = {}
MONGO_FULL_WRITE_CONCERN = 1
REDIS_HOST = "localhost"
REDIS_CLIENT_OPTIONS = {}
WEB_ROOT = 'http://localhost:8000'
PP_WEBSCR = "https://www.sandbox.paypal.com/cgi-bin/webscr"
PP_BUTTON_ID = "XD6G9Z7VMRM3Q"
PP_RECEIVER_ID = "NR6NTNSRT7NDJ"
PAYMENT_AMOUNT = 2
PAYMENT_SYNC_DAYS = 365.25
PAYMENT_CURRENCY = "USD"
# Celebration mode config
# Because why not, I'm waiting for my account to get to the front of the sync queue.
CELEBRATION_MODES = {
(
datetime(day=21, month=6, year=datetime.now().year, hour=0, minute=0),
datetime(day=21, month=6, year=datetime.now().year, hour=23, minute=59)
): {
"Logo": "tapiriik-inuktitut.png",
"Subtitle": "National Aboriginal Day",
"TitleText": "Our Home on Native Land"
}
}
# Hidden from regular signup
SOFT_LAUNCH_SERVICES = []
# Visibly disabled + excluded from synchronization
DISABLED_SERVICES = []
# Rejected by synchronization worker
REJECTED_SERVICES = []
# Services no longer available - will be removed across the site + excluded from sync.
WITHDRAWN_SERVICES = []
# Where to put per-user sync logs
USER_SYNC_LOGS = "./"
# Set at startup
SITE_VER = "unknown"
# Cache lots of stuff to make local debugging faster
AGGRESSIVE_CACHE = True
# Diagnostics auth, None = no auth
DIAG_AUTH_TOTP_SECRET = DIAG_AUTH_PASSWORD = None
SPORTTRACKS_OPENFIT_ENDPOINT = "https://api.sporttracks.mobi/api/v2"
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = './sent_emails'
WORKER_INDEX = int(os.environ.get("TAPIRIIK_WORKER_INDEX", 0))
# Used for distributing outgoing calls across multiple interfaces
HTTP_SOURCE_ADDR = "0.0.0.0"
RABBITMQ_BROKER_URL = "amqp://guest@localhost//"
RABBITMQ_USER_QUEUE_STATS_URL = "http://guest:guest@localhost:15672/api/queues/%2F/tapiriik-users?lengths_age=3600&lengths_incr=60&msg_rates_age=3600&msg_rates_incr=60"
GARMIN_CONNECT_USER_WATCH_ACCOUNTS = {}
from .local_settings import *
| 30.342857 | 168 | 0.711982 |
fba7881a2c7a61227cf666fff59ac64348fb9f78 | 6,342 | py | Python | UMRFormer/experiment_planning/alternative_experiment_planning/patch_size/experiment_planner_3DUNet_isotropic_in_voxels.py | supersunshinefk/UMRFormer-Net | bf165ca2158a158f7c194c6201af2a4fcabe8742 | [
"MIT"
] | null | null | null | UMRFormer/experiment_planning/alternative_experiment_planning/patch_size/experiment_planner_3DUNet_isotropic_in_voxels.py | supersunshinefk/UMRFormer-Net | bf165ca2158a158f7c194c6201af2a4fcabe8742 | [
"MIT"
] | null | null | null | UMRFormer/experiment_planning/alternative_experiment_planning/patch_size/experiment_planner_3DUNet_isotropic_in_voxels.py | supersunshinefk/UMRFormer-Net | bf165ca2158a158f7c194c6201af2a4fcabe8742 | [
"MIT"
] | null | null | null | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from UMRFormer_Net.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2
from UMRFormer_Net.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from UMRFormer_Net.network_architecture.generic_UNet import Generic_UNet
from UMRFormer_Net.paths import *
class ExperimentPlanner3D_IsoPatchesInVoxels(ExperimentPlanner):
"""
patches that are isotropic in the number of voxels (not mm), such as 128x128x128 allow more voxels to be processed
at once because we don't have to do annoying pooling stuff
CAREFUL!
this one does not support transpose_forward and transpose_backward
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_IsoPatchesInVoxels, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "UMRFormerData_isoPatchesInVoxels"
self.plans_fname = join(self.preprocessed_output_folder, "UMRFormerPlans" + "fixedisoPatchesInVoxels_plans_3D.pkl")
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
input_patch_size = new_median_shape
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
# find the largest axis. If patch is isotropic, pick the axis with the largest spacing
if len(np.unique(new_shp)) == 1:
axis_to_be_reduced = np.argsort(current_spacing)[-1]
else:
axis_to_be_reduced = np.argsort(new_shp)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props_poolLateV2(tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = max(1, min(batch_size, max_batch_size))
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
| 54.672414 | 123 | 0.608168 |
ca515bda78aac057fe36c36868859629b0036b39 | 2,563 | py | Python | web/tests/functional/store/__init__.py | LebedevRI/codechecker | f4548444851e19c8cc7b8fd621f3dcdf987d7140 | [
"Apache-2.0"
] | null | null | null | web/tests/functional/store/__init__.py | LebedevRI/codechecker | f4548444851e19c8cc7b8fd621f3dcdf987d7140 | [
"Apache-2.0"
] | null | null | null | web/tests/functional/store/__init__.py | LebedevRI/codechecker | f4548444851e19c8cc7b8fd621f3dcdf987d7140 | [
"Apache-2.0"
] | null | null | null | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""Setup for the package tests."""
import multiprocessing
import os
import shutil
from libtest import codechecker
from libtest import env
from libtest import plist_test
# Test workspace initialized at setup for report storage tests.
TEST_WORKSPACE = None
def setup_package():
"""Setup the environment for the tests then start the server."""
global TEST_WORKSPACE
TEST_WORKSPACE = env.get_workspace('store_test')
os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE
# Configuration options.
codechecker_cfg = {
'check_env': env.test_env(TEST_WORKSPACE),
'workspace': TEST_WORKSPACE,
'checkers': [],
'reportdir': os.path.join(TEST_WORKSPACE, 'reports'),
'test_project': 'store_test'
}
# Start or connect to the running CodeChecker server and get connection
# details.
print("This test uses a CodeChecker server... connecting...")
server_access = codechecker.start_or_get_server()
server_access['viewer_product'] = 'store_test'
codechecker.add_test_package_product(server_access, TEST_WORKSPACE)
# Extend the checker configuration with the server access.
codechecker_cfg.update(server_access)
# Export the test configuration to the workspace.
env.export_test_cfg(TEST_WORKSPACE, {'codechecker_cfg': codechecker_cfg})
# Copy test files to a temporary directory not to modify the
# files in the repository.
# Report files will be overwritten during the tests.
test_dir = os.path.dirname(os.path.realpath(__file__))
dst_dir = os.path.join(TEST_WORKSPACE, "test_proj")
shutil.copytree(os.path.join(test_dir, "test_proj"), dst_dir)
report_file = os.path.join(dst_dir, "divide_zero.plist")
plist_test.prefix_file_path(report_file, dst_dir)
def teardown_package():
"""Clean up after the test."""
# TODO: If environment variable is set keep the workspace
# and print out the path.
global TEST_WORKSPACE
check_env = env.import_test_cfg(TEST_WORKSPACE)[
'codechecker_cfg']['check_env']
codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)
print("Removing: " + TEST_WORKSPACE)
shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)
| 32.858974 | 77 | 0.687476 |
a721c12a0465bac6a5ccbcf71e51327e44837f9c | 122 | py | Python | fetal-head-segment/__main__.py | AbdulAhadKhan/fetal-head-segment | 89a33738947b2c9d755877261e666390a5cbee49 | [
"MIT"
] | null | null | null | fetal-head-segment/__main__.py | AbdulAhadKhan/fetal-head-segment | 89a33738947b2c9d755877261e666390a5cbee49 | [
"MIT"
] | 3 | 2020-06-26T21:01:08.000Z | 2021-09-08T02:14:55.000Z | fetal-head-segment/__main__.py | AbdulAhadKhan/fetal-head-segment | 89a33738947b2c9d755877261e666390a5cbee49 | [
"MIT"
] | null | null | null | import os
from command import execute
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '6'
if __name__ == '__main__':
execute()
| 15.25 | 40 | 0.713115 |
1dea9f19669ea8bf9e067c27b7ecf6c10a4a6594 | 2,686 | py | Python | utils/fixscatter.py | emit-sds/emit-sds-l1b | be5307fe6821a043971becdd33609b4cf89b1974 | [
"Apache-2.0"
] | null | null | null | utils/fixscatter.py | emit-sds/emit-sds-l1b | be5307fe6821a043971becdd33609b4cf89b1974 | [
"Apache-2.0"
] | null | null | null | utils/fixscatter.py | emit-sds/emit-sds-l1b | be5307fe6821a043971becdd33609b4cf89b1974 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
#
# Copyright 2020 California Institute of Technology
#
# EMIT Radiometric Calibration code
# Author: David R Thompson, david.r.thompson@jpl.nasa.gov
import scipy.linalg
import os, sys
import numpy as np
from spectral.io import envi
import json
import logging
import argparse
from numba import jit
from math import pow
from fpa import FPA, frame_embed, frame_extract
def find_header(infile):
if os.path.exists(infile+'.hdr'):
return infile+'.hdr'
elif os.path.exists('.'.join(infile.split('.')[:-1])+'.hdr'):
return '.'.join(infile.split('.')[:-1])+'.hdr'
else:
raise FileNotFoundError('Did not find header file')
def fix_scatter(frame, spectral_correction, spatial_correction):
if frame.shape[0] != spectral_correction.shape[0] or \
frame.shape[1] != spatial_correction.shape[1]:
logging.error('Mismatched frame size')
fixed = spectral_correction @ (spatial_correction @ frame.T).T
return fixed
def main():
description = "Fix spatial and spectral scatter"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('input')
parser.add_argument('--config')
parser.add_argument('spatial_corr')
parser.add_argument('spectral_corr')
parser.add_argument('output')
args = parser.parse_args()
fpa = FPA(args.config)
infile = envi.open(find_header(args.input))
spatialfile = envi.open(find_header(args.spatial_corr))
spatial = np.squeeze(spatialfile.load())
spectralfile = envi.open(find_header(args.spectral_corr))
spectral = np.squeeze(spectralfile.load())
if int(infile.metadata['data type']) == 2:
dtype = np.uint16
elif int(infile.metadata['data type']) == 4:
dtype = np.float32
else:
raise ValueError('Unsupported data type')
if infile.metadata['interleave'] != 'bil':
raise ValueError('Unsupported interleave')
rows = int(infile.metadata['bands'])
columns = int(infile.metadata['samples'])
lines = int(infile.metadata['lines'])
nframe = rows * columns
envi.write_envi_header(args.output+'.hdr',infile.metadata)
with open(args.input,'rb') as fin:
with open(args.output,'wb') as fout:
for line in range(lines):
# Read a frame of data
if line%10==0:
logging.info('Line '+str(line))
frame = np.fromfile(fin, count=nframe, dtype=dtype)
frame = np.array(frame.reshape((rows, columns)),dtype=np.float32)
fixed = fix_scatter(frame, spectral, spatial)
np.array(fixed, dtype=np.float32).tofile(fout)
print('done')
if __name__ == '__main__':
main()
| 28.273684 | 77 | 0.669397 |
9eb0e9c200821f2aa71f9f74afeed1e4b2d4e475 | 8,828 | py | Python | src/img_dataset/TF_flowers.py | wenxichen/tensorflow_yolo2 | f040d9932816d8b2f8d7a67231060f0beea821d4 | [
"MIT"
] | 25 | 2017-05-15T08:44:26.000Z | 2019-09-05T05:23:59.000Z | src/img_dataset/TF_flowers.py | wenxichen/tensorflow_yolo2 | f040d9932816d8b2f8d7a67231060f0beea821d4 | [
"MIT"
] | 5 | 2017-05-16T07:18:47.000Z | 2018-02-14T08:22:56.000Z | src/img_dataset/TF_flowers.py | wenxichen/tensorflow_yolo2 | f040d9932816d8b2f8d7a67231060f0beea821d4 | [
"MIT"
] | 10 | 2017-07-03T13:27:27.000Z | 2018-11-21T13:10:16.000Z | """ILSVRC 2017 Classicifation Dataset.
Use val_split to set the portion for validation set.
"""
import os
import cv2
import math
import numpy as np
import random
import pickle
import copy
from tqdm import trange, tqdm
import config as cfg
class tf_flowers:
def __init__(self, val_split, rebuild=False, data_aug=False):
self.name = 'TF_flowers'
self.devkit_path = cfg.FLOWERS_PATH
self.data_path = self.devkit_path
self.cache_path = cfg.CACHE_PATH
self.batch_size = cfg.BATCH_SIZE
self.image_size = cfg.IMAGE_SIZE
self.rebuild = rebuild
self.data_aug = data_aug
self.num_class = 5
self.classes = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
self.class_to_ind = dict(
list(zip(self.classes, list(range(self.num_class)))))
self.train_cursor = 0
self.val_cursor = 0
self.epoch = 1
self.gt_labels = None
self.val_split = val_split
assert os.path.exists(self.devkit_path), \
'TF_flowers path does not exist: {}'.format(self.devkit_path)
assert os.path.exists(self.data_path), \
'Path does not exist: {}'.format(self.data_path)
self.prepare()
def prepare(self):
"""Create a list of ground truth that includes input path and label.
Then, split the data into training set and validation set according to val_split.
"""
# TODO: may still need to implement test
cache_file = os.path.join(
self.cache_path, 'TF_flowers_gt_labels.pkl')
if os.path.isfile(cache_file) and not self.rebuild:
print('Loading gt_labels from: ' + cache_file)
with open(cache_file, 'rb') as f:
gt_labels = pickle.load(f)
print('{} dataset gt_labels loaded from {}'.
format(self.name, cache_file))
else:
print('Processing gt_labels using...')
gt_labels = []
for c in tqdm(self.classes):
label = self.class_to_ind[c]
c_data_dir = os.path.join(self.data_path, c)
for f in os.listdir(c_data_dir):
if f[-4:].lower() == '.jpg':
imname = os.path.join(c_data_dir, f)
gt_labels.append({'imname': imname, 'label': label})
print('Saving gt_labels to: ' + cache_file)
with open(cache_file, 'wb') as f:
pickle.dump(gt_labels, f)
random.shuffle(gt_labels)
self.gt_labels = gt_labels
self.dataset_size = len(gt_labels)
self.total_batch = int(
math.ceil(self.dataset_size / float(self.batch_size)))
cut_idx = int(self.dataset_size * self.val_split)
self.val_gt_labels = copy.deepcopy(gt_labels[:cut_idx])
self.train_gt_labels = copy.deepcopy(gt_labels[cut_idx:])
print('training set size: {:d}, validation set size: {:d}'
.format(len(self.train_gt_labels), len(self.val_gt_labels)))
def get_train(self):
return self._get('train')
def get_val(self):
return self._get('val')
def _get(self, image_set):
"""Get shuffled images and labels according to batchsize.
Use image_set to set whether to get training set or validation set.
validation set data will not have data augmentation.
Return:
images: 4D numpy array
labels: 1D numpy array
"""
if image_set == 'val':
gt_labels = self.val_gt_labels
cursor = self.val_cursor
data_aug = False
elif image_set == 'train':
gt_labels = self.train_gt_labels
cursor = self.train_cursor
data_aug = self.data_aug
images = np.zeros(
(self.batch_size, self.image_size, self.image_size, 3))
labels = np.zeros(self.batch_size)
count = 0
while count < self.batch_size:
imname = gt_labels[cursor]['imname']
images[count, :, :, :] = self.image_read(
imname, data_aug=data_aug)
labels[count] = gt_labels[cursor]['label']
count += 1
cursor += 1
if cursor >= len(gt_labels):
random.shuffle(self.train_gt_labels)
cursor = 0
if image_set == 'train':
self.epoch += 1
if image_set == 'val':
self.val_cursor = cursor
elif image_set == 'train':
self.train_cursor = cursor
return images, labels
def image_read(self, imname, data_aug=False):
image = cv2.imread(imname)
#####################
# Data Augmentation #
#####################
if data_aug:
flip = bool(random.getrandbits(1))
rotate_deg = random.randint(0, 359)
# 75% chance to do random crop
# another 25% change in maintaining input at 224x224
# this help simplify the input processing for test, val
# TODO: can make multiscale test input later
random_crop_chance = random.randint(0, 3)
too_small = False
color_pert = bool(random.getrandbits(1))
exposure_shift = bool(random.getrandbits(1))
if flip:
image = image[:, ::-1, :]
# assume color image
rows, cols, _ = image.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate_deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
# color perturbation
if color_pert:
hue_shift_sign = bool(random.getrandbits(1))
hue_shift = random.randint(0, 10)
saturation_shift_sign = bool(random.getrandbits(1))
saturation_shift = random.randint(0, 10)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# TODO: currently not sure what cv2 does to values
# that are larger than the maximum.
# It seems it does not cut at the max
# nor normalize the whole by multiplying a factor.
# need to expore this in more detail
if hue_shift_sign:
hsv[:, :, 0] += hue_shift
else:
hsv[:, :, 0] -= hue_shift
if saturation_shift_sign:
hsv[:, :, 1] += saturation_shift
else:
hsv[:, :, 1] -= saturation_shift
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
if exposure_shift:
brighter = bool(random.getrandbits(1))
if brighter:
gamma = random.uniform(1, 2)
else:
gamma = random.uniform(0.5, 1)
image = ((image / 255.0) ** (1.0 / gamma)) * 255
# random crop
if random_crop_chance > 0:
# current random crop upbound is 292 (1.3 x 224)
short_side_len = random.randint(
self.image_size, cfg.RAND_CROP_UPBOUND)
short_side = min([cols, rows])
if short_side == cols:
scaled_cols = short_side_len
factor = float(short_side_len) / cols
scaled_rows = int(rows * factor)
else:
scaled_rows = short_side_len
factor = float(short_side_len) / rows
scaled_cols = int(cols * factor)
# print "scaled_cols and rows:", scaled_cols, scaled_rows
if scaled_cols < 224 or scaled_rows < 224:
too_small = True
print "Image is too small,", imname
else:
image = cv2.resize(image, (scaled_cols, scaled_rows))
col_offset = random.randint(
0, scaled_cols - self.image_size)
row_offset = random.randint(
0, scaled_rows - self.image_size)
# print "col_offset and row_offset:", col_offset, row_offset
image = image[row_offset:self.image_size + row_offset,
col_offset:self.image_size + col_offset]
# assuming still using image size 224x224
# print "image shape is", image.shape
if random_crop_chance == 0 or too_small:
image = cv2.resize(image, (self.image_size, self.image_size))
else:
image = cv2.resize(image, (self.image_size, self.image_size))
image = image.astype(np.float32)
image = (image / 255.0) * 2.0 - 1.0
return image
| 39.587444 | 89 | 0.543838 |
8b2eb2fcddf510f9a1f5c478a8ead42d71618ddc | 393 | py | Python | setup.py | marcvinyals/problemtools | e55f97112c9385ed7df1aed1f7b45c516195389e | [
"MIT"
] | null | null | null | setup.py | marcvinyals/problemtools | e55f97112c9385ed7df1aed1f7b45c516195389e | [
"MIT"
] | null | null | null | setup.py | marcvinyals/problemtools | e55f97112c9385ed7df1aed1f7b45c516195389e | [
"MIT"
] | null | null | null | #!/usr/bin/python
from distutils.core import setup
setup(name='Kattis Problemtools',
version='1.1',
description='The Kattis Problem Tools',
maintainer='Per Austrin',
maintainer_email='austrin@kattis.com',
url='https://github.com/Kattis/problemtools',
packages=['problemtools','problemtools.ProblemPlasTeX'],
package_dir = {'problemtools': 'src'}
)
| 28.071429 | 62 | 0.679389 |
fc20591bc3ac7a8520a5bf232c744f5df912316a | 1,408 | py | Python | api/models/song.py | donovandicks/datafy | 614d0ae409cfe5c5ed28679d25efa8a1fd34333e | [
"MIT"
] | null | null | null | api/models/song.py | donovandicks/datafy | 614d0ae409cfe5c5ed28679d25efa8a1fd34333e | [
"MIT"
] | 4 | 2021-12-07T05:56:28.000Z | 2021-12-07T05:56:48.000Z | api/models/song.py | donovandicks/datafy | 614d0ae409cfe5c5ed28679d25efa8a1fd34333e | [
"MIT"
] | null | null | null | """Query models for Spotify songs"""
from typing import Dict
from pydantic import BaseModel
from models.common import Query
class SongQuery(Query):
"""The query model for the `/songs` route"""
class Song(BaseModel):
"""The object model representing key elements of Spotify songs"""
content: str
"""The type of content - used for serialization"""
id: str
"""The Spotify ID of the song"""
name: str
"""The name of the song"""
artists: list[str]
"""The artist(s) who perform the song"""
popularity: int
"""The popularity of the song from 0 to 100"""
album: str
"""The album the song is from"""
release_date: str
"""The date the album was first released"""
@classmethod
def from_dict(cls, song: Dict):
"""
Converts a dictionary into a `Song` object
Params
------
song: Dict
a song object returned from Spotify
Returns
-------
song: Song
a `Song` object with the data elements from the input `song`
"""
return Song(
content="Song",
id=song["id"],
name=song["name"],
artists=[artist["name"] for artist in song["artists"]],
popularity=song["popularity"],
album=song["album"]["name"],
release_date=song["album"]["release_date"],
)
| 22.709677 | 72 | 0.568892 |
6eab2cf250a2070e975db41592f44e058e71dd4e | 396 | py | Python | main08.py | rifhall/E02a-Control-Structures | 1452e03f5876afcf233213944e37acc5c6eb4335 | [
"MIT"
] | null | null | null | main08.py | rifhall/E02a-Control-Structures | 1452e03f5876afcf233213944e37acc5c6eb4335 | [
"MIT"
] | null | null | null | main08.py | rifhall/E02a-Control-Structures | 1452e03f5876afcf233213944e37acc5c6eb4335 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
print('Greetings!')
color = ''
color = input("What is my favorite color? ")
while (color != 'red'):
color = color.lower().strip()
if (color == 'red'):
print('Correct!')
elif (color == 'pink'):
print('Close!')
else:
print('Sorry, try again.') | 22 | 76 | 0.583333 |
9200bcfcdaf6c29ad14847d40cfe84ac36a55622 | 22,666 | py | Python | lib/galaxy/tools/execute.py | brinkmanlab/galaxy | 8b5553ca9885f350491e0570b23e53fc13009226 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/execute.py | brinkmanlab/galaxy | 8b5553ca9885f350491e0570b23e53fc13009226 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/execute.py | brinkmanlab/galaxy | 8b5553ca9885f350491e0570b23e53fc13009226 | [
"CC-BY-3.0"
] | null | null | null | """
Once state information has been calculated, handle actually executing tools
from various states, tracking results, and building implicit dataset
collections from matched collections.
"""
import collections
import logging
from boltons.iterutils import remap
from galaxy import model
from galaxy.model.dataset_collections.structure import get_structure, tool_output_to_structure
from galaxy.tool_util.parser import ToolOutputCollectionPart
from galaxy.tools.actions import filter_output, on_text_for_names, ToolExecutionCache
from galaxy.tools.parameters.basic import is_runtime_value
log = logging.getLogger(__name__)
SINGLE_EXECUTION_SUCCESS_MESSAGE = "Tool ${tool_id} created job ${job_id}"
BATCH_EXECUTION_MESSAGE = "Executed ${job_count} job(s) for tool ${tool_id} request"
class PartialJobExecution(Exception):
def __init__(self, execution_tracker):
self.execution_tracker = execution_tracker
MappingParameters = collections.namedtuple("MappingParameters", ["param_template", "param_combinations"])
def execute(trans, tool, mapping_params, history, rerun_remap_job_id=None, collection_info=None, workflow_invocation_uuid=None, invocation_step=None, max_num_jobs=None, job_callback=None, completed_jobs=None, workflow_resource_parameters=None, validate_outputs=False):
"""
Execute a tool and return object containing summary (output data, number of
failures, etc...).
"""
if max_num_jobs is not None:
assert invocation_step is not None
if rerun_remap_job_id:
assert invocation_step is None
all_jobs_timer = tool.app.execution_timer_factory.get_timer(
'internals.galaxy.tools.execute.job_batch', BATCH_EXECUTION_MESSAGE
)
if invocation_step is None:
execution_tracker = ToolExecutionTracker(trans, tool, mapping_params, collection_info, completed_jobs=completed_jobs)
else:
execution_tracker = WorkflowStepExecutionTracker(trans, tool, mapping_params, collection_info, invocation_step, completed_jobs=completed_jobs)
execution_cache = ToolExecutionCache(trans)
def execute_single_job(execution_slice, completed_job):
job_timer = tool.app.execution_timer_factory.get_timer(
'internals.galaxy.tools.execute.job_single', SINGLE_EXECUTION_SUCCESS_MESSAGE
)
params = execution_slice.param_combination
if workflow_invocation_uuid:
params['__workflow_invocation_uuid__'] = workflow_invocation_uuid
elif '__workflow_invocation_uuid__' in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params['__workflow_invocation_uuid__']
if workflow_resource_parameters:
params['__workflow_resource_params__'] = workflow_resource_parameters
elif '__workflow_resource_params__' in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params['__workflow_resource_params__']
if validate_outputs:
params['__validate_outputs__'] = True
job, result = tool.handle_single_execution(trans, rerun_remap_job_id, execution_slice, history, execution_cache, completed_job, collection_info, job_callback=job_callback, flush_job=False)
if job:
log.debug(job_timer.to_str(tool_id=tool.id, job_id=job.id))
execution_tracker.record_success(execution_slice, job, result)
else:
execution_tracker.record_error(result)
tool_action = tool.tool_action
if hasattr(tool_action, "check_inputs_ready"):
for params in execution_tracker.param_combinations:
# This will throw an exception if the tool is not ready.
tool_action.check_inputs_ready(
tool,
trans,
params,
history,
execution_cache=execution_cache,
collection_info=collection_info,
)
execution_tracker.ensure_implicit_collections_populated(history, mapping_params.param_template)
job_count = len(execution_tracker.param_combinations)
jobs_executed = 0
has_remaining_jobs = False
execution_slice = None
for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
if max_num_jobs is not None and jobs_executed >= max_num_jobs:
has_remaining_jobs = True
break
else:
execute_single_job(execution_slice, completed_jobs[i])
history = execution_slice.history or history
jobs_executed += 1
if execution_slice:
# a side effect of adding datasets to a history is a commit within db_next_hid (even with flush=False).
history.add_pending_items()
else:
# Make sure collections, implicit jobs etc are flushed even if there are no precreated output datasets
trans.sa_session.flush()
tool_id = tool.id
for job in execution_tracker.successful_jobs:
# Put the job in the queue if tracking in memory
tool.app.job_manager.enqueue(job, tool=tool, flush=False)
trans.log_event("Added job to the job queue, id: %s" % str(job.id), tool_id=tool_id)
trans.sa_session.flush()
if has_remaining_jobs:
raise PartialJobExecution(execution_tracker)
else:
execution_tracker.finalize_dataset_collections(trans)
log.debug(all_jobs_timer.to_str(job_count=job_count, tool_id=tool.id))
return execution_tracker
class ExecutionSlice:
def __init__(self, job_index, param_combination, dataset_collection_elements=None):
self.job_index = job_index
self.param_combination = param_combination
self.dataset_collection_elements = dataset_collection_elements
self.history = None
class ExecutionTracker:
def __init__(self, trans, tool, mapping_params, collection_info, completed_jobs=None):
# Known ahead of time...
self.trans = trans
self.tool = tool
self.mapping_params = mapping_params
self.collection_info = collection_info
self.completed_jobs = completed_jobs
self._on_text = None
# Populated as we go...
self.failed_jobs = 0
self.execution_errors = []
self.successful_jobs = []
self.output_datasets = []
self.output_collections = []
self.implicit_collections = {}
@property
def param_combinations(self):
return self.mapping_params.param_combinations
@property
def example_params(self):
if self.mapping_params.param_combinations:
return self.mapping_params.param_combinations[0]
else:
# TODO: This isn't quite right - what we want is something like param_template wrapped,
# need a test case with an output filter applied to an empty list, still this is
# an improvement over not allowing mapping of empty lists.
return self.mapping_params.param_template
@property
def job_count(self):
return len(self.param_combinations)
def record_error(self, error):
self.failed_jobs += 1
message = "There was a failure executing a job for tool [%s] - %s"
log.warning(message, self.tool.id, error)
self.execution_errors.append(error)
@property
def on_text(self):
if self._on_text is None:
collection_names = ["collection %d" % c.hid for c in self.collection_info.collections.values()]
self._on_text = on_text_for_names(collection_names)
return self._on_text
def output_name(self, trans, history, params, output):
on_text = self.on_text
try:
output_collection_name = self.tool.tool_action.get_output_name(
output,
dataset=None,
tool=self.tool,
on_text=on_text,
trans=trans,
history=history,
params=params,
incoming=None,
job_params=None,
)
except Exception:
output_collection_name = f"{self.tool.name} across {on_text}"
return output_collection_name
def sliced_input_collection_structure(self, input_name):
unqualified_recurse = self.tool.profile < 18.09 and "|" not in input_name
def find_collection(input_dict, input_name):
for key, value in input_dict.items():
if key == input_name:
return value
if isinstance(value, dict):
if "|" in input_name:
prefix, rest_input_name = input_name.split("|", 1)
if key == prefix:
return find_collection(value, rest_input_name)
elif unqualified_recurse:
# Looking for "input1" instead of "cond|input1" for instance.
# See discussion on https://github.com/galaxyproject/galaxy/issues/6157.
unqualified_match = find_collection(value, input_name)
if unqualified_match:
return unqualified_match
input_collection = find_collection(self.example_params, input_name)
if input_collection is None:
raise Exception("Failed to find referenced collection in inputs.")
if not hasattr(input_collection, "collection"):
raise Exception("Referenced input parameter is not a collection.")
collection_type_description = self.trans.app.dataset_collections_service.collection_type_descriptions.for_collection_type(input_collection.collection.collection_type)
subcollection_mapping_type = None
if self.is_implicit_input(input_name):
subcollection_mapping_type = self.collection_info.subcollection_mapping_type(input_name)
return get_structure(input_collection, collection_type_description, leaf_subcollection_type=subcollection_mapping_type)
def _structure_for_output(self, trans, tool_output):
structure = self.collection_info.structure
if hasattr(tool_output, "default_identifier_source"):
# Switch the structure for outputs if the output specified a default_identifier_source
collection_type_descriptions = trans.app.dataset_collections_service.collection_type_descriptions
source_collection = self.collection_info.collections.get(tool_output.default_identifier_source)
if source_collection:
collection_type_description = collection_type_descriptions.for_collection_type(source_collection.collection.collection_type)
_structure = structure.for_dataset_collection(source_collection.collection, collection_type_description=collection_type_description)
if structure.can_match(_structure):
structure = _structure
return structure
def _mapped_output_structure(self, trans, tool_output):
collections_manager = trans.app.dataset_collections_service
output_structure = tool_output_to_structure(self.sliced_input_collection_structure, tool_output, collections_manager)
# self.collection_info.structure - the mapping structure with default_identifier_source
# used to determine the identifiers to use.
mapping_structure = self._structure_for_output(trans, tool_output)
# Output structure may not be known, but input structure must be,
# otherwise this step of the workflow shouldn't have been scheduled
# or the tool should not have been executable on this input.
mapped_output_structure = mapping_structure.multiply(output_structure)
return mapped_output_structure
def ensure_implicit_collections_populated(self, history, params):
if not self.collection_info:
return
history = history or self.tool.get_default_history_by_trans(self.trans)
# params = param_combinations[0] if param_combinations else mapping_params.param_template
self.precreate_output_collections(history, params)
def precreate_output_collections(self, history, params):
# params is just one sample tool param execution with parallelized
# collection replaced with a specific dataset. Need to replace this
# with the collection and wrap everything up so can evaluate output
# label.
trans = self.trans
params.update(self.collection_info.collections) # Replace datasets with source collections for labelling outputs.
collection_instances = {}
implicit_inputs = self.implicit_inputs
implicit_collection_jobs = model.ImplicitCollectionJobs()
# trying to guess these filters at the collection levell is tricky because
# the filter condition could vary from element to element. Just do best we
# we can for now.
example_params = self.example_params.copy()
# walk through and optional replace runtime values with None, assume they
# would have been replaced by now if they were going to be set.
def replace_optional_runtime_values(path, key, value):
if is_runtime_value(value):
return key, None
return key, value
example_params = remap(example_params, visit=replace_optional_runtime_values)
for output_name, output in self.tool.outputs.items():
if filter_output(self.tool, output, example_params):
continue
output_collection_name = self.output_name(trans, history, params, output)
effective_structure = self._mapped_output_structure(trans, output)
collection_instance = trans.app.dataset_collections_service.precreate_dataset_collection_instance(
trans=trans,
parent=history,
name=output_collection_name,
structure=effective_structure,
implicit_inputs=implicit_inputs,
implicit_output_name=output_name,
completed_collection=self.completed_jobs,
)
collection_instance.implicit_collection_jobs = implicit_collection_jobs
collection_instances[output_name] = collection_instance
trans.sa_session.add(collection_instance)
# Needed to flush the association created just above with
# job.add_output_dataset_collection.
trans.sa_session.flush()
self.implicit_collections = collection_instances
@property
def implicit_collection_jobs(self):
# TODO: refactor to track this properly maybe?
if self.implicit_collections:
return next(iter(self.implicit_collections.values())).implicit_collection_jobs
else:
return None
def finalize_dataset_collections(self, trans):
# TODO: this probably needs to be reworked some, we should have the collection methods
# return a list of changed objects to add to the session and flush and we should only
# be finalizing collections to a depth of self.collection_info.structure. So for instance
# if you are mapping a list over a tool that dynamically generates lists - we won't actually
# know the structure of the inner list until after its job is complete.
if self.failed_jobs > 0:
for i, implicit_collection in enumerate(self.implicit_collections.values()):
if i == 0:
implicit_collection_jobs = implicit_collection.implicit_collection_jobs
implicit_collection_jobs.populated_state = "failed"
trans.sa_session.add(implicit_collection_jobs)
implicit_collection.collection.handle_population_failed("One or more jobs failed during dataset initialization.")
trans.sa_session.add(implicit_collection.collection)
else:
for i, implicit_collection in enumerate(self.implicit_collections.values()):
if i == 0:
implicit_collection_jobs = implicit_collection.implicit_collection_jobs
implicit_collection_jobs.populated_state = "ok"
trans.sa_session.add(implicit_collection_jobs)
implicit_collection.collection.finalize(
collection_type_description=self.collection_info.structure.collection_type_description
)
trans.sa_session.add(implicit_collection.collection)
trans.sa_session.flush()
@property
def implicit_inputs(self):
implicit_inputs = list(self.collection_info.collections.items())
return implicit_inputs
def is_implicit_input(self, input_name):
return input_name in self.collection_info.collections
def walk_implicit_collections(self):
return self.collection_info.structure.walk_collections(self.implicit_collections)
def new_execution_slices(self):
if self.collection_info is None:
for job_index, param_combination in enumerate(self.param_combinations):
yield ExecutionSlice(job_index, param_combination)
else:
yield from self.new_collection_execution_slices()
def record_success(self, execution_slice, job, outputs):
# TODO: successful_jobs need to be inserted in the correct place...
self.successful_jobs.append(job)
self.output_datasets.extend(outputs)
for job_output in job.output_dataset_collection_instances:
self.output_collections.append((job_output.name, job_output.dataset_collection_instance))
if self.implicit_collections:
implicit_collection_jobs = None
for output_name, collection_instance in self.implicit_collections.items():
job.add_output_dataset_collection(output_name, collection_instance)
if implicit_collection_jobs is None:
implicit_collection_jobs = collection_instance.implicit_collection_jobs
job_assoc = model.ImplicitCollectionJobsJobAssociation()
job_assoc.order_index = execution_slice.job_index
job_assoc.implicit_collection_jobs = implicit_collection_jobs
job_assoc.job = job
self.trans.sa_session.add(job_assoc)
# Seperate these because workflows need to track their jobs belong to the invocation
# in the database immediately and they can be recovered.
class ToolExecutionTracker(ExecutionTracker):
def __init__(self, trans, tool, mapping_params, collection_info, completed_jobs=None):
super().__init__(trans, tool, mapping_params, collection_info, completed_jobs=completed_jobs)
# New to track these things for tool output API response in the tool case,
# in the workflow case we just write stuff to the database and forget about
# it.
self.outputs_by_output_name = collections.defaultdict(list)
def record_success(self, execution_slice, job, outputs):
super().record_success(execution_slice, job, outputs)
for output_name, output_dataset in outputs:
if ToolOutputCollectionPart.is_named_collection_part_name(output_name):
# Skip known collection outputs, these will be covered by
# output collections.
continue
self.outputs_by_output_name[output_name].append(output_dataset)
for job_output in job.output_dataset_collections:
self.outputs_by_output_name[job_output.name].append(job_output.dataset_collection)
def new_collection_execution_slices(self):
for job_index, (param_combination, dataset_collection_elements) in enumerate(zip(self.param_combinations, self.walk_implicit_collections())):
completed_job = self.completed_jobs and self.completed_jobs[job_index]
if not completed_job:
for dataset_collection_element in dataset_collection_elements.values():
assert dataset_collection_element.element_object is None
yield ExecutionSlice(job_index, param_combination, dataset_collection_elements)
class WorkflowStepExecutionTracker(ExecutionTracker):
def __init__(self, trans, tool, mapping_params, collection_info, invocation_step, completed_jobs=None):
super().__init__(trans, tool, mapping_params, collection_info, completed_jobs=completed_jobs)
self.invocation_step = invocation_step
def record_success(self, execution_slice, job, outputs):
super().record_success(execution_slice, job, outputs)
if not self.collection_info:
for output_name, output in outputs:
self.invocation_step.add_output(output_name, output)
self.invocation_step.job = job
def new_collection_execution_slices(self):
for job_index, (param_combination, dataset_collection_elements) in enumerate(zip(self.param_combinations, self.walk_implicit_collections())):
completed_job = self.completed_jobs and self.completed_jobs[job_index]
if not completed_job:
found_result = False
for dataset_collection_element in dataset_collection_elements.values():
if dataset_collection_element.element_object is not None:
found_result = True
break
if found_result:
continue
yield ExecutionSlice(job_index, param_combination, dataset_collection_elements)
def ensure_implicit_collections_populated(self, history, params):
if not self.collection_info:
return
history = history or self.tool.get_default_history_by_trans(self.trans)
if self.invocation_step.is_new:
self.precreate_output_collections(history, params)
for output_name, implicit_collection in self.implicit_collections.items():
self.invocation_step.add_output(output_name, implicit_collection)
else:
collections = {}
for output_assoc in self.invocation_step.output_dataset_collections:
implicit_collection = output_assoc.dataset_collection
assert hasattr(implicit_collection, "history_content_type") # make sure it is an HDCA and not a DC
collections[output_assoc.output_name] = output_assoc.dataset_collection
self.implicit_collections = collections
self.invocation_step.implicit_collection_jobs = self.implicit_collection_jobs
__all__ = ('execute', )
| 47.024896 | 268 | 0.698138 |
737926222c3a1e912eaeb2e8b7cba6c01123cb49 | 828 | py | Python | pigpio_HCSR04.py | meigrafd/Sample-Code | 7065950dfd0728cfad66db08327f0efa4c26999c | [
"MIT"
] | 10 | 2017-02-08T18:36:09.000Z | 2021-03-31T04:22:25.000Z | pigpio_HCSR04.py | meigrafd/Sample-Code | 7065950dfd0728cfad66db08327f0efa4c26999c | [
"MIT"
] | 2 | 2017-01-30T22:15:43.000Z | 2017-09-01T21:49:29.000Z | pigpio_HCSR04.py | meigrafd/Sample-Code | 7065950dfd0728cfad66db08327f0efa4c26999c | [
"MIT"
] | 8 | 2017-02-09T12:33:47.000Z | 2021-04-03T13:34:33.000Z | import pigpio
import time
GPIO_TRIGGER = 20
GPIO_ECHO = 21
def distance():
gpio.write(GPIO_TRIGGER, pigpio.HIGH)
time.sleep(0.00001)
gpio.write(GPIO_TRIGGER, pigpio.LOW)
StartZeit = time.time()
StopZeit = time.time()
while gpio.read(GPIO_ECHO) == 0:
StartZeit = time.time()
while gpio.read(GPIO_ECHO) == 1:
StopZeit = time.time()
TimeElapsed = StopZeit - StartZeit
return (TimeElapsed * 34300) / 2
if __name__ == '__main__':
try:
gpio = pigpio.pi()
gpio.set_mode(GPIO_TRIGGER, pigpio.OUTPUT)
gpio.set_mode(GPIO_ECHO, pigpio.INPUT)
while True:
print("Gemessene Entfernung = %.1f cm" % distance())
time.sleep(1)
except KeyboardInterrupt:
print("Messung vom User gestoppt")
gpio.stop()
#EOF | 22.378378 | 64 | 0.620773 |
977379646fcfa6d369ef3913a7e658b5b9333486 | 9,243 | py | Python | CSCA08/A2_Checker_V2.py | Lemonsity/CSCA-Checkers | 79b9819e4e148c2d3235e65856eaf6c4d991926c | [
"MIT"
] | null | null | null | CSCA08/A2_Checker_V2.py | Lemonsity/CSCA-Checkers | 79b9819e4e148c2d3235e65856eaf6c4d991926c | [
"MIT"
] | null | null | null | CSCA08/A2_Checker_V2.py | Lemonsity/CSCA-Checkers | 79b9819e4e148c2d3235e65856eaf6c4d991926c | [
"MIT"
] | null | null | null | # Youzhang Mark Sun
# Oct 14, 2019
# 66 test cases
# designed for elevation.py 2019 fall semester
import unittest
import elevation as e # require elevation.py file
class A2CheckerV2(unittest.TestCase):
def setUp(self):
self.m_1 = [[1]]
self.m_2 = [[2, 3],
[5, 1]]
self.m_3 = [[11, 12, 16],
[9, 8, 14],
[4, 27, 2]]
self.m_4 = [[8, 31, 18, 15],
[44, 30, 11, 25],
[55, 59, 22, 62],
[5, 14, 46, 60]]
self.m_5 = [[58, 103, 45, 96, 115],
[70, 8, 56, 31, 55],
[107, 78, 52, 89, 91],
[35, 40, 20, 16, 112],
[60, 93, 67, 99, 37]]
def test_compare(self):
print("\ncompare_elevations_within_row:", end = " ")
self.m_x = [[1, 2, 3], [5, 6, 5], [4, 5, 8]]
self.assertListEqual(e.compare_elevations_within_row(self.m_x, 0, 2), [1, 1, 1], "contain equal")
self.assertListEqual(e.compare_elevations_within_row(self.m_x, 1, 6), [2, 1, 0], "contain equal")
self.assertListEqual(e.compare_elevations_within_row(self.m_x, 2, 7), [2, 0, 1], "simple")
self.assertListEqual(e.compare_elevations_within_row(self.m_3, 0, 10), [0, 0, 3], "simple")
self.assertListEqual(e.compare_elevations_within_row(self.m_3, 1, 10), [2, 0, 1], "simple")
def test_update(self):
print("\n\nupdate_elevation:", end = " ")
e.update_elevation(self.m_3, [0, 1], [2, 1], 1)
self.assertListEqual(self.m_3, [[11, 13, 16], [9, 9, 14], [4, 28, 2]], "vertical entire")
e.update_elevation(self.m_3, [0, 0], [1, 0], 1)
self.assertListEqual(self.m_3, [[12, 13, 16], [10, 9, 14], [4, 28, 2]], "vertical partial")
e.update_elevation(self.m_4, [1, 0], [1, 3], 2)
self.assertListEqual(self.m_4, [[8, 31, 18, 15], [46, 32, 13, 27], [55, 59, 22, 62], [5, 14, 46, 60]], "horizontal entire")
e.update_elevation(self.m_4, [2, 1], [2, 2], 2)
self.assertListEqual(self.m_4, [[8, 31, 18, 15], [46, 32, 13, 27], [55, 61, 24, 62], [5, 14, 46, 60]], "horizontal partial")
e.update_elevation(self.m_2, [0, 0], [0, 0], 3)
self.assertListEqual(self.m_2, [[5, 3], [5, 1]], "edge case one spot")
e.update_elevation(self.m_2, [1, 0], [1, 1], 5)
self.assertListEqual(self.m_2, [[5, 3], [10, 6]], "horizontal entire")
e.update_elevation(self.m_5, [0, 1], [4, 1], -1)
self.assertListEqual(self.m_5, [[58, 102, 45, 96, 115], [70, 7, 56, 31, 55], [107, 77, 52, 89, 91], [35, 39, 20, 16, 112], [60, 92, 67, 99, 37]], "vertical entire")
e.update_elevation(self.m_5, [2, 1], [2, 3], 1000)
self.assertListEqual(self.m_5, [[58, 102, 45, 96, 115], [70, 7, 56, 31, 55], [107, 1077, 1052, 1089, 91], [35, 39, 20, 16, 112], [60, 92, 67, 99, 37]], "horizontal partial")
e.update_elevation(self.m_5, [4, 4], [4, 4], 10)
self.assertListEqual(self.m_5, [[58, 102, 45, 96, 115], [70, 7, 56, 31, 55], [107, 1077, 1052, 1089, 91], [35, 39, 20, 16, 112], [60, 92, 67, 99, 47]], "edge case one spot")
def test_average(self):
print("\n\nget_average_elevation:", end = " ")
self.assertAlmostEqual(e.get_average_elevation(self.m_1), 1, 3, "simple")
self.assertAlmostEqual(e.get_average_elevation(self.m_2), 2.75, 3, "simple")
self.assertAlmostEqual(e.get_average_elevation(self.m_3), 11.4444, 3, "simple")
self.assertAlmostEqual(e.get_average_elevation(self.m_4), 31.5625, 3, "simple")
self.assertAlmostEqual(e.get_average_elevation(self.m_5), 65.32, 3, "simple")
def test_peak(self):
print("\n\nfind_peak:", end = " ")
self.assertListEqual(e.find_peak(self.m_1), [0, 0], "simple")
self.assertListEqual(e.find_peak(self.m_2), [1, 0], "simple")
self.assertListEqual(e.find_peak(self.m_3), [2, 1], "simple")
self.assertListEqual(e.find_peak(self.m_4), [2, 3], "simple")
self.assertListEqual(e.find_peak(self.m_5), [0, 4], "simple")
def test_is_sink(self):
print("\nis_sink:", end = " ")
self.assertTrue(e.is_sink(self.m_1, [0, 0]), "edge case no surrounding")
self.assertFalse(e.is_sink(self.m_1, [1, 1]), "out of bound")
self.assertFalse(e.is_sink(self.m_2, [0, 0]), "special case diagonal")
self.assertTrue(e.is_sink(self.m_2, [1, 1]), "edge case corner")
self.m_x = [[2, 2, 2], [2, 2, 2], [2, 2, 2]]
self.assertTrue(e.is_sink(self.m_x, [1, 1]), "edge case all same")
self.assertFalse(e.is_sink(self.m_3, [1, 2]), "simple")
self.assertTrue(e.is_sink(self.m_4, [1, 2]), "simple")
self.assertFalse(e.is_sink(self.m_4, [3, 1]), "simple")
self.assertFalse(e.is_sink(self.m_4, [-1, 2]), "out of bound")
self.assertTrue(e.is_sink(self.m_5, [1, 1]), "simple")
self.assertTrue(e.is_sink(self.m_5, [3, 3]), "simple")
self.assertFalse(e.is_sink(self.m_5, [3, 2]), "simple")
def test_local_sink(self):
print("\n\nfind_local_sink:", end = " ")
self.assertListEqual(e.find_local_sink(self.m_1, [0, 0]), [0, 0], "edge case no surrounding")
self.assertListEqual(e.find_local_sink(self.m_2, [1, 1]), [1, 1], "edge case already sink")
self.assertListEqual(e.find_local_sink(self.m_2, [0, 0]), [1, 1], "simple diagonal")
self.assertListEqual(e.find_local_sink(self.m_3, [1, 1]), [2, 2], "simple diagonal")
self.assertListEqual(e.find_local_sink(self.m_3, [1, 2]), [2, 2], "simple diagonal")
self.assertListEqual(e.find_local_sink(self.m_4, [1, 1]), [0, 0], "simple diagonal")
self.assertListEqual(e.find_local_sink(self.m_4, [2, 2]), [1, 2], "simple")
self.assertListEqual(e.find_local_sink(self.m_5, [1, 1]), [1, 1], "edge case already sink")
self.assertListEqual(e.find_local_sink(self.m_5, [2, 3]), [3, 3], "simple")
def test_hike(self):
print("\ncan_hike_to:", end = " ")
self.assertTrue(e.can_hike_to(self.m_3, [2, 2], [0, 2], 100), "simple")
self.assertTrue(e.can_hike_to(self.m_3, [2, 2], [0, 2], 14), "edge case end with 0")
self.assertFalse(e.can_hike_to(self.m_3, [2, 2], [0, 2], 13), "simple not enough")
self.m_x = [[7, 7, 7], [1, 1, 7], [1, 1, 6]]
self.assertTrue(e.can_hike_to(self.m_x, [2, 2], [0, 0], 100), "simple")
self.assertTrue(e.can_hike_to(self.m_x, [2, 2], [0, 0], 1), "edge case end with 0")
self.assertFalse(e.can_hike_to(self.m_x, [2, 2], [0, 0], 0), "edge case end not enough")
self.m_x = [[7, 7, 7], [1, 1, 7], [1, 1, 7]]
self.assertTrue(e.can_hike_to(self.m_x, [2, 2], [0, 0], 0), "edge case end with 0")
self.assertTrue(e.can_hike_to(self.m_4, [2, 2], [0, 1], 31), "edge case end with 0")
self.assertFalse(e.can_hike_to(self.m_4, [2, 2], [0, 1], 30), "edge case not enough")
self.m_x = [[10007, 100, 9], [1, 1, 8], [1, 1, 7]]
self.assertTrue(e.can_hike_to(self.m_x, [2, 2], [0, 0], 10010), "simple")
self.assertFalse(e.can_hike_to(self.m_x, [2, 2], [0, 0], 9999), "edge case not enough")
self.assertFalse(e.can_hike_to(self.m_x, [2, 2], [0, 0], 50), "simple not enough")
self.m_x = [[100, 1, 1], [60, 1, 1], [59, 58, 50]]
self.assertTrue(e.can_hike_to(self.m_x, [2, 2], [1, 0], 20), "simple")
self.assertFalse(e.can_hike_to(self.m_x, [2, 2], [0, 0], 10), "simple not enough")
def test_resolution(self):
print("\n\nget_lower_resolution", end = " ")
self.assertListEqual(e.get_lower_resolution(self.m_1), [[1]], "odd size")
self.assertListEqual(e.get_lower_resolution(self.m_2), [[2]], "simple")
self.assertListEqual(e.get_lower_resolution(self.m_3), [[10, 15], [15, 2]], "odd size")
self.assertListEqual(e.get_lower_resolution(self.m_4), [[28, 17], [33, 47]], "simple")
self.assertListEqual(e.get_lower_resolution(self.m_5), [[59, 57, 85], [65, 44, 101], [76, 83, 37]], "odd size")
def tearDown(self):
pass
def basic():
suite = unittest.TestSuite()
suite.addTest(A2CheckerV2("test_compare"))
suite.addTest(A2CheckerV2("test_update"))
suite.addTest(A2CheckerV2("test_average"))
suite.addTest(A2CheckerV2("test_peak"))
return suite
def sink():
suite = unittest.TestSuite()
suite.addTest(A2CheckerV2("test_is_sink"))
suite.addTest(A2CheckerV2("test_local_sink"))
return suite
def complex():
suite = unittest.TestSuite()
suite.addTest(A2CheckerV2("test_hike"))
suite.addTest(A2CheckerV2("test_resolution"))
return suite
if (__name__ == "__main__"):
runner = unittest.TextTestRunner()
basic = runner.run(basic())
if (basic.wasSuccessful()):
print("Passed basic tests!")
print("======================================================================")
sink = runner.run(sink())
if (sink.wasSuccessful()):
print("Passed sink related tests!")
print("======================================================================")
complex = runner.run(complex())
if (complex.wasSuccessful()):
print("Passed complex tests!")
| 55.347305 | 181 | 0.574813 |
b8a65cf2245059e395bdbf9b815ef2740107d41e | 139 | py | Python | inference/preprocessors/__init__.py | narumiruna/inference-template | 4946f4de632c49a8b65b484a4eb77979da8d8208 | [
"MIT"
] | null | null | null | inference/preprocessors/__init__.py | narumiruna/inference-template | 4946f4de632c49a8b65b484a4eb77979da8d8208 | [
"MIT"
] | null | null | null | inference/preprocessors/__init__.py | narumiruna/inference-template | 4946f4de632c49a8b65b484a4eb77979da8d8208 | [
"MIT"
] | null | null | null | import sys
from ..utils import get_factory
from .dummy import DummyPreprocessor
PreprocessorFactory = get_factory(sys.modules[__name__])
| 19.857143 | 56 | 0.827338 |
976aed87b167dfbac1130de434f799b9ac9450fe | 8,308 | py | Python | website/addons/box/model.py | alexschiller/osf.io | 4122d4be152c6189142c2ebb19cfdee09c77035d | [
"Apache-2.0"
] | null | null | null | website/addons/box/model.py | alexschiller/osf.io | 4122d4be152c6189142c2ebb19cfdee09c77035d | [
"Apache-2.0"
] | null | null | null | website/addons/box/model.py | alexschiller/osf.io | 4122d4be152c6189142c2ebb19cfdee09c77035d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import httplib as http
import logging
import os
from box import CredentialsV2, BoxClient
from box.client import BoxClientException
from modularodm import fields
from oauthlib.oauth2 import InvalidGrantError
import requests
from urllib3.exceptions import MaxRetryError
from framework.auth import Auth
from framework.exceptions import HTTPError
from website.addons.base import exceptions
from website.addons.base import AddonOAuthUserSettingsBase, AddonOAuthNodeSettingsBase
from website.addons.base import StorageAddonBase
from website.util import api_v2_url
from addons.box import settings
from addons.box.serializer import BoxSerializer
from website.oauth.models import ExternalProvider
logger = logging.getLogger(__name__)
class Box(ExternalProvider):
name = 'Box'
short_name = 'box'
client_id = settings.BOX_KEY
client_secret = settings.BOX_SECRET
auth_url_base = settings.BOX_OAUTH_AUTH_ENDPOINT
callback_url = settings.BOX_OAUTH_TOKEN_ENDPOINT
auto_refresh_url = callback_url
refresh_time = settings.REFRESH_TIME
expiry_time = settings.EXPIRY_TIME
default_scopes = ['root_readwrite']
def handle_callback(self, response):
"""View called when the Oauth flow is completed. Adds a new BoxUserSettings
record to the user and saves the user's access token and account info.
"""
client = BoxClient(CredentialsV2(
response['access_token'],
response['refresh_token'],
settings.BOX_KEY,
settings.BOX_SECRET,
))
about = client.get_user_info()
return {
'provider_id': about['id'],
'display_name': about['name'],
'profile_url': 'https://app.box.com/profile/{0}'.format(about['id'])
}
class BoxUserSettings(AddonOAuthUserSettingsBase):
"""Stores user-specific box information
"""
oauth_provider = Box
serializer = BoxSerializer
def revoke_remote_oauth_access(self, external_account):
try:
# TODO: write client for box, stop using third-party lib
requests.request(
'POST',
settings.BOX_OAUTH_REVOKE_ENDPOINT,
params={
'client_id': settings.BOX_KEY,
'client_secret': settings.BOX_SECRET,
'token': external_account.oauth_key,
}
)
except requests.HTTPError:
pass
class BoxNodeSettings(StorageAddonBase, AddonOAuthNodeSettingsBase):
oauth_provider = Box
serializer = BoxSerializer
folder_id = fields.StringField(default=None)
folder_name = fields.StringField()
folder_path = fields.StringField()
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Box(self.external_account)
return self._api
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder_id)
def fetch_folder_name(self):
return self.folder_name
def fetch_full_folder_path(self):
return self.folder_path
def get_folders(self, **kwargs):
folder_id = kwargs.get('folder_id')
if folder_id is None:
return [{
'id': '0',
'path': '/',
'addon': 'box',
'kind': 'folder',
'name': '/ (Full Box)',
'urls': {
# 'folders': node.api_url_for('box_folder_list', folderId=0),
'folders': api_v2_url('nodes/{}/addons/box/folders/'.format(self.owner._id),
params={'id': '0'}
)
}
}]
try:
Box(self.external_account).refresh_oauth_key()
client = BoxClient(self.external_account.oauth_key)
except BoxClientException:
raise HTTPError(http.FORBIDDEN)
try:
metadata = client.get_folder(folder_id)
except BoxClientException:
raise HTTPError(http.NOT_FOUND)
except MaxRetryError:
raise HTTPError(http.BAD_REQUEST)
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise HTTPError(http.NOT_FOUND)
folder_path = '/'.join(
[
x['name']
for x in metadata['path_collection']['entries']
] + [metadata['name']]
)
return [
{
'addon': 'box',
'kind': 'folder',
'id': item['id'],
'name': item['name'],
'path': os.path.join(folder_path, item['name']).replace('All Files', ''),
'urls': {
'folders': api_v2_url('nodes/{}/addons/box/folders/'.format(self.owner._id),
params={'id': item['id']}
)
}
}
for item in metadata['item_collection']['entries']
if item['type'] == 'folder'
]
def set_folder(self, folder_id, auth):
self.folder_id = str(folder_id)
self.folder_name, self.folder_path = self._folder_data(folder_id)
self.nodelogger.log(action='folder_selected', save=True)
def _folder_data(self, folder_id):
# Split out from set_folder for ease of testing, due to
# outgoing requests. Should only be called by set_folder
try:
Box(self.external_account).refresh_oauth_key(force=True)
except InvalidGrantError:
raise exceptions.InvalidAuthError()
try:
client = BoxClient(self.external_account.oauth_key)
folder_data = client.get_folder(self.folder_id)
except BoxClientException:
raise exceptions.InvalidFolderError()
folder_name = folder_data['name'].replace('All Files', '') or '/ (Full Box)'
folder_path = '/'.join(
[x['name'] for x in folder_data['path_collection']['entries'] if x['name']]
+ [folder_data['name']]
).replace('All Files', '') or '/'
return folder_name, folder_path
def clear_settings(self):
self.folder_id = None
self.folder_name = None
self.folder_path = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
folder_id = self.folder_id
self.clear_settings()
if add_log:
extra = {'folder_id': folder_id}
self.nodelogger.log(action='node_deauthorized', extra=extra, save=True)
self.clear_auth()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
Box(self.external_account).refresh_oauth_key()
return {'token': self.external_account.oauth_key}
except BoxClientException as error:
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if self.folder_id is None:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder_id}
def create_waterbutler_log(self, auth, action, metadata):
self.owner.add_log(
'box_{0}'.format(action),
auth=auth,
params={
'path': metadata['materialized'],
'project': self.owner.parent_id,
'node': self.owner._id,
'folder': self.folder_id,
'urls': {
'view': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='view', path=metadata['path']),
'download': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='download', path=metadata['path']),
},
},
)
##### Callback overrides #####
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
| 33.099602 | 144 | 0.597857 |
15364e979f3eb28ed94ca2842efa3465c725ee7c | 23,017 | py | Python | src/electionguard/guardian.py | john-s-morgan/electionguard-python | f0a25b0ac99fac5c8d4e3545055dbdd05968d021 | [
"MIT"
] | null | null | null | src/electionguard/guardian.py | john-s-morgan/electionguard-python | f0a25b0ac99fac5c8d4e3545055dbdd05968d021 | [
"MIT"
] | null | null | null | src/electionguard/guardian.py | john-s-morgan/electionguard-python | f0a25b0ac99fac5c8d4e3545055dbdd05968d021 | [
"MIT"
] | null | null | null | # pylint: disable=too-many-public-methods
# pylint: disable=too-many-instance-attributes
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, TypeVar
from .auxiliary import (
AuxiliaryKeyPair,
AuxiliaryPublicKey,
AuxiliaryDecrypt,
AuxiliaryEncrypt,
)
from .ballot import SubmittedBallot
from .decryption import (
compute_compensated_decryption_share,
compute_compensated_decryption_share_for_ballot,
compute_decryption_share,
compute_decryption_share_for_ballot,
)
from .decryption_share import CompensatedDecryptionShare, DecryptionShare
from .election import CiphertextElectionContext
from .election_polynomial import PUBLIC_COMMITMENT
from .elgamal import elgamal_combine_public_keys
from .group import ElementModQ
from .key_ceremony import (
CeremonyDetails,
ELECTION_JOINT_PUBLIC_KEY,
ELECTION_PUBLIC_KEY,
ElectionKeyPair,
ElectionPartialKeyBackup,
ElectionPartialKeyChallenge,
ElectionPartialKeyVerification,
ElectionPublicKey,
generate_election_key_pair,
generate_election_partial_key_backup,
generate_election_partial_key_challenge,
generate_rsa_auxiliary_key_pair,
PublicKeySet,
verify_election_partial_key_backup,
verify_election_partial_key_challenge,
)
from .logs import log_warning
from .rsa import rsa_encrypt, rsa_decrypt
from .serializable import Serializable
from .schnorr import SchnorrProof
from .tally import CiphertextTally
from .types import BALLOT_ID, GUARDIAN_ID
from .utils import get_optional
@dataclass
class GuardianRecord(Serializable):
"""
Published record containing all required information per Guardian
for Election record used in verification processes
"""
guardian_id: GUARDIAN_ID
"""Unique identifier of the guardian"""
sequence_order: int
"""
Unique sequence order of the guardian indicating the order
in which the guardian should be processed
"""
election_public_key: ELECTION_PUBLIC_KEY
"""
Guardian's election public key for encrypting election objects.
"""
election_commitments: List[PUBLIC_COMMITMENT]
"""
Commitment for each coeffficient of the guardians secret polynomial.
First commitment is and should be identical to election_public_key.
"""
election_proofs: List[SchnorrProof]
"""
Proofs for each commitment for each coeffficient of the guardians secret polynomial.
First proof is the proof for the election_public_key.
"""
def publish_guardian_record(election_public_key: ElectionPublicKey) -> GuardianRecord:
"""
Published record containing all required information per Guardian
for Election record used in verification processes
:param election_public_key: Guardian's election public key
:return: Guardian's record
"""
return GuardianRecord(
election_public_key.owner_id,
election_public_key.sequence_order,
election_public_key.key,
election_public_key.coefficient_commitments,
election_public_key.coefficient_proofs,
)
@dataclass
class PrivateGuardianRecord(Serializable):
"""Unpublishable private record containing information per Guardian."""
guardian_id: GUARDIAN_ID
"""Unique identifier of the guardian"""
election_keys: ElectionKeyPair
"""Private election Key pair of this guardian"""
auxiliary_keys: AuxiliaryKeyPair
"""Private auxiliary key pair of this guardian"""
backups_to_share: Dict[GUARDIAN_ID, ElectionPartialKeyBackup]
"""This guardian's partial key backups that will be shared to other guardians"""
guardian_auxiliary_public_keys: Dict[GUARDIAN_ID, AuxiliaryPublicKey]
"""Received auxiliary public keys that are shared with this guardian"""
guardian_election_public_keys: Dict[GUARDIAN_ID, ElectionPublicKey]
"""Received election public keys that are shared with this guardian"""
guardian_election_partial_key_backups: Dict[GUARDIAN_ID, ElectionPartialKeyBackup]
"""Received partial key backups that are shared with this guardian"""
guardian_election_partial_key_verifications: Dict[
GUARDIAN_ID, ElectionPartialKeyVerification
]
"""Verifications of other guardian's backups"""
# pylint: disable=too-many-instance-attributes
class Guardian:
"""
Guardian of election responsible for safeguarding information and decrypting results.
The first half of the guardian involves the key exchange known as the key ceremony.
The second half relates to the decryption process.
"""
id: str
sequence_order: int # Cannot be zero
ceremony_details: CeremonyDetails
_auxiliary_keys: AuxiliaryKeyPair
_election_keys: ElectionKeyPair
_backups_to_share: Dict[GUARDIAN_ID, ElectionPartialKeyBackup]
"""
The collection of this guardian's partial key backups that will be shared to other guardians
"""
# From Other Guardians
_guardian_auxiliary_public_keys: Dict[GUARDIAN_ID, AuxiliaryPublicKey]
"""
The collection of other guardians' auxiliary public keys that are shared with this guardian
"""
_guardian_election_public_keys: Dict[GUARDIAN_ID, ElectionPublicKey]
"""
The collection of other guardians' election public keys that are shared with this guardian
"""
_guardian_election_partial_key_backups: Dict[GUARDIAN_ID, ElectionPartialKeyBackup]
"""
The collection of other guardians' partial key backups that are shared with this guardian
"""
_guardian_election_partial_key_verifications: Dict[
GUARDIAN_ID, ElectionPartialKeyVerification
]
"""
The collection of other guardians' verifications that they shared their backups correctly
"""
def __init__(
self,
id: str,
sequence_order: int,
number_of_guardians: int,
quorum: int,
nonce_seed: Optional[ElementModQ] = None,
) -> None:
"""
Initialize a guardian with the specified arguments.
:param id: the unique identifier for the guardian
:param sequence_order: a unique number in [1, 256) that identifies this guardian
:param number_of_guardians: the total number of guardians that will participate in the election
:param quorum: the count of guardians necessary to decrypt
:param nonce_seed: an optional `ElementModQ` value that can be used to generate the `ElectionKeyPair`.
It is recommended to only use this field for testing.
"""
self.id = id
self.sequence_order = sequence_order
self.set_ceremony_details(number_of_guardians, quorum)
self._backups_to_share = {}
self._guardian_auxiliary_public_keys = {}
self._guardian_election_public_keys = {}
self._guardian_election_partial_key_backups = {}
self._guardian_election_partial_key_verifications = {}
self.generate_auxiliary_key_pair()
self.generate_election_key_pair(nonce_seed if nonce_seed is not None else None)
def reset(self, number_of_guardians: int, quorum: int) -> None:
"""
Reset guardian to initial state.
:param number_of_guardians: Number of guardians in election
:param quorum: Quorum of guardians required to decrypt
"""
self._backups_to_share.clear()
self._guardian_auxiliary_public_keys.clear()
self._guardian_election_public_keys.clear()
self._guardian_election_partial_key_backups.clear()
self._guardian_election_partial_key_verifications.clear()
self.set_ceremony_details(number_of_guardians, quorum)
self.generate_auxiliary_key_pair()
self.generate_election_key_pair()
def publish(self) -> GuardianRecord:
"""Publish record of guardian with all required information."""
return publish_guardian_record(self._election_keys.share())
def export_private_data(self) -> PrivateGuardianRecord:
"""Export private data of guardian. Warning cannot be published."""
return PrivateGuardianRecord(
self.id,
self._election_keys,
self._auxiliary_keys,
self._backups_to_share,
self._guardian_auxiliary_public_keys,
self._guardian_election_public_keys,
self._guardian_election_partial_key_backups,
self._guardian_election_partial_key_verifications,
)
def set_ceremony_details(self, number_of_guardians: int, quorum: int) -> None:
"""
Set ceremony details for election.
:param number_of_guardians: Number of guardians in election
:param quorum: Quorum of guardians required to decrypt
"""
self.ceremony_details = CeremonyDetails(number_of_guardians, quorum)
# Public Keys
def share_public_keys(self) -> PublicKeySet:
"""
Share public election and auxiliary keys for guardian.
:return: Public set of election and auxiliary keys
"""
return PublicKeySet(
self._election_keys.share(),
self._auxiliary_keys.share(),
)
def save_guardian_public_keys(self, public_key_set: PublicKeySet) -> None:
"""
Save public election and auxiliary keys for another guardian.
:param public_key_set: Public set of election and auxiliary keys
"""
self.save_auxiliary_public_key(public_key_set.auxiliary)
self.save_election_public_key(public_key_set.election)
def all_public_keys_received(self) -> bool:
"""
True if all election and auxiliary public keys have been received.
:return: All election and auxiliary public keys backups received
"""
return (
self.all_auxiliary_public_keys_received()
and self.all_election_public_keys_received()
)
def generate_auxiliary_key_pair(
self,
generate_auxiliary_key_pair: Callable[
[GUARDIAN_ID, int], AuxiliaryKeyPair
] = generate_rsa_auxiliary_key_pair,
) -> None:
"""
Generate auxiliary key pair.
:param generate_auxiliary_key_pair: Function to generate auxiliary key pair
"""
self._auxiliary_keys = generate_auxiliary_key_pair(self.id, self.sequence_order)
self.save_auxiliary_public_key(self.share_auxiliary_public_key())
def share_auxiliary_public_key(self) -> AuxiliaryPublicKey:
"""
Share auxiliary public key with another guardian.
:return: Auxiliary Public Key
"""
return self._auxiliary_keys.share()
def save_auxiliary_public_key(self, key: AuxiliaryPublicKey) -> None:
"""
Save a guardians auxiliary public key.
:param key: Auxiliary public key
"""
self._guardian_auxiliary_public_keys[key.owner_id] = key
def all_auxiliary_public_keys_received(self) -> bool:
"""
True if all auxiliary public keys have been received.
:return: All auxiliary public keys backups received
"""
return (
len(self._guardian_auxiliary_public_keys)
== self.ceremony_details.number_of_guardians
)
def generate_election_key_pair(self, nonce: ElementModQ = None) -> None:
"""Generate election key pair for encrypting/decrypting election."""
self._election_keys = generate_election_key_pair(
self.id, self.sequence_order, self.ceremony_details.quorum, nonce
)
self.save_election_public_key(self.share_election_public_key())
def share_election_public_key(self) -> ElectionPublicKey:
"""
Share election public key with another guardian.
:return: Election public key
"""
return self._election_keys.share()
def save_election_public_key(self, key: ElectionPublicKey) -> None:
"""
Save a guardians election public key.
:param key: Election public key
"""
self._guardian_election_public_keys[key.owner_id] = key
def all_election_public_keys_received(self) -> bool:
"""
True if all election public keys have been received.
:return: All election public keys backups received
"""
return (
len(self._guardian_election_public_keys)
== self.ceremony_details.number_of_guardians
)
def generate_election_partial_key_backups(
self, encrypt: AuxiliaryEncrypt = rsa_encrypt
) -> bool:
"""
Generate all election partial key backups based on existing public keys.
:param encrypt: Encryption function using auxiliary key
"""
if not self.all_auxiliary_public_keys_received():
log_warning(
f"guardian; {self.id} could not generate election partial key backups: missing auxiliary keys"
)
return False
for auxiliary_key in self._guardian_auxiliary_public_keys.values():
backup = generate_election_partial_key_backup(
self.id, self._election_keys.polynomial, auxiliary_key, encrypt
)
if backup is None:
log_warning(
f"guardian; {self.id} could not generate election partial key backups: failed to encrypt"
)
return False
self._backups_to_share[auxiliary_key.owner_id] = backup
return True
# Election Partial Key Backup
def share_election_partial_key_backup(
self, designated_id: GUARDIAN_ID
) -> Optional[ElectionPartialKeyBackup]:
"""
Share election partial key backup with another guardian.
:param designated_id: Designated guardian
:return: Election partial key backup or None
"""
return self._backups_to_share.get(designated_id)
def share_election_partial_key_backups(self) -> List[ElectionPartialKeyBackup]:
"""
Share all election partial key backups.
:return: Election partial key backup or None
"""
return list(self._backups_to_share.values())
def save_election_partial_key_backup(
self, backup: ElectionPartialKeyBackup
) -> None:
"""
Save election partial key backup from another guardian.
:param backup: Election partial key backup
"""
self._guardian_election_partial_key_backups[backup.owner_id] = backup
def all_election_partial_key_backups_received(self) -> bool:
"""
True if all election partial key backups have been received.
:return: All election partial key backups received
"""
return (
len(self._guardian_election_partial_key_backups)
== self.ceremony_details.number_of_guardians - 1
)
# Verification
def verify_election_partial_key_backup(
self,
guardian_id: GUARDIAN_ID,
decrypt: AuxiliaryDecrypt = rsa_decrypt,
) -> Optional[ElectionPartialKeyVerification]:
"""
Verify election partial key backup value is in polynomial.
:param guardian_id: Owner of backup to verify
:param decrypt:
:return: Election partial key verification or None
"""
backup = self._guardian_election_partial_key_backups.get(guardian_id)
public_key = self._guardian_election_public_keys.get(guardian_id)
if backup is None or public_key is None:
return None
return verify_election_partial_key_backup(
self.id, backup, public_key, self._auxiliary_keys, decrypt
)
def publish_election_backup_challenge(
self, guardian_id: GUARDIAN_ID
) -> Optional[ElectionPartialKeyChallenge]:
"""
Publish election backup challenge of election partial key verification.
:param guardian_id: Owner of election key
:return: Election partial key challenge or None
"""
backup_in_question = self._backups_to_share.get(guardian_id)
if backup_in_question is None:
return None
return generate_election_partial_key_challenge(
backup_in_question, self._election_keys.polynomial
)
def verify_election_partial_key_challenge(
self, challenge: ElectionPartialKeyChallenge
) -> ElectionPartialKeyVerification:
"""
Verify challenge of previous verification of election partial key.
:param challenge: Election partial key challenge
:return: Election partial key verification
"""
return verify_election_partial_key_challenge(self.id, challenge)
def save_election_partial_key_verification(
self, verification: ElectionPartialKeyVerification
) -> None:
"""
Save election partial key verification from another guardian.
:param verification: Election partial key verification
"""
self._guardian_election_partial_key_verifications[
verification.designated_id
] = verification
def all_election_partial_key_backups_verified(self) -> bool:
"""
True if all election partial key backups have been verified.
:return: All election partial key backups verified
"""
required = self.ceremony_details.number_of_guardians - 1
if len(self._guardian_election_partial_key_verifications) != required:
return False
for verification in self._guardian_election_partial_key_verifications.values():
if not verification.verified:
return False
return True
# Joint Key
def publish_joint_key(self) -> Optional[ELECTION_JOINT_PUBLIC_KEY]:
"""
Create the joint election key from the public keys of all guardians.
:return: Optional joint key for election
"""
if not self.all_election_public_keys_received():
return None
if not self.all_election_partial_key_backups_verified():
return None
public_keys = map(
lambda public_key: public_key.key,
self._guardian_election_public_keys.values(),
)
return elgamal_combine_public_keys(public_keys)
def share_other_guardian_key(self, guardian_id: GUARDIAN_ID) -> ElectionPublicKey:
"""Share other guardians keys shared during key ceremony."""
return get_optional(self._guardian_election_public_keys.get(guardian_id))
def compute_tally_share(
self, tally: CiphertextTally, context: CiphertextElectionContext
) -> Optional[DecryptionShare]:
"""
Compute the decryption share of tally.
:param tally: Ciphertext tally to get share of
:param context: Election context
:return: Decryption share of tally or None if failure
"""
return compute_decryption_share(
self._election_keys,
tally,
context,
)
def compute_ballot_shares(
self, ballots: List[SubmittedBallot], context: CiphertextElectionContext
) -> Dict[BALLOT_ID, Optional[DecryptionShare]]:
"""
Compute the decryption shares of ballots.
:param ballots: List of ciphertext ballots to get shares of
:param context: Election context
:return: Decryption shares of ballots or None if failure
"""
shares = {}
for ballot in ballots:
share = compute_decryption_share_for_ballot(
self._election_keys,
ballot,
context,
)
shares[ballot.object_id] = share
return shares
def compute_compensated_tally_share(
self,
missing_guardian_id: GUARDIAN_ID,
tally: CiphertextTally,
context: CiphertextElectionContext,
decrypt: AuxiliaryDecrypt = rsa_decrypt,
) -> Optional[CompensatedDecryptionShare]:
"""
Compute the compensated decryption share of a tally for a missing guardian.
:param missing_guardian_id: Missing guardians id
:param tally: Ciphertext tally to get share of
:param context: Election context
:param decrypt: Auxiliary decrypt method
:return: Compensated decryption share of tally or None if failure
"""
# Ensure missing guardian information available
missing_guardian_key = self._guardian_election_public_keys.get(
missing_guardian_id
)
missing_guardian_backup = self._guardian_election_partial_key_backups.get(
missing_guardian_id
)
if missing_guardian_key is None or missing_guardian_backup is None:
return None
return compute_compensated_decryption_share(
self.share_election_public_key(),
self._auxiliary_keys,
missing_guardian_key,
missing_guardian_backup,
tally,
context,
decrypt,
)
def compute_compensated_ballot_shares(
self,
missing_guardian_id: GUARDIAN_ID,
ballots: List[SubmittedBallot],
context: CiphertextElectionContext,
decrypt: AuxiliaryDecrypt = rsa_decrypt,
) -> Dict[BALLOT_ID, Optional[CompensatedDecryptionShare]]:
"""
Compute the compensated decryption share of each ballots for a missing guardian.
:param missing_guardian_id: Missing guardians id
:param ballots: List of ciphertext ballots to get shares of
:param context: Election context
:param decrypt: Auxiliary decrypt method
:return: Compensated decryption shares of ballots or None if failure
"""
shares: Dict[BALLOT_ID, Optional[CompensatedDecryptionShare]] = {
ballot.object_id: None for ballot in ballots
}
# Ensure missing guardian information available
missing_guardian_key = self._guardian_election_public_keys.get(
missing_guardian_id
)
missing_guardian_backup = self._guardian_election_partial_key_backups.get(
missing_guardian_id
)
if missing_guardian_key is None or missing_guardian_backup is None:
return shares
for ballot in ballots:
share = compute_compensated_decryption_share_for_ballot(
self.share_election_public_key(),
self._auxiliary_keys,
missing_guardian_key,
missing_guardian_backup,
ballot,
context,
decrypt,
)
shares[ballot.object_id] = share
return shares
_SHARE = TypeVar("_SHARE")
def get_valid_ballot_shares(
ballot_shares: Dict[BALLOT_ID, Optional[_SHARE]]
) -> Dict[BALLOT_ID, _SHARE]:
"""Get valid ballot shares."""
filtered_shares = {}
for ballot_id, ballot_share in ballot_shares.items():
if ballot_share is not None:
filtered_shares[ballot_id] = ballot_share
return filtered_shares
| 35.740683 | 110 | 0.686319 |
1ee045dc2780b935c50ae6764f41fbf123b73d79 | 3,112 | py | Python | plugins/dbnd-databricks/src/dbnd_databricks/databricks_config.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | plugins/dbnd-databricks/src/dbnd_databricks/databricks_config.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | plugins/dbnd-databricks/src/dbnd_databricks/databricks_config.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | import logging
from typing import Dict, List
from dbnd import parameter
from dbnd._core.constants import SparkClusters
from dbnd._core.task.config import Config
from dbnd_spark.spark_config import SparkEngineConfig
logger = logging.getLogger(__name__)
class DatabricksCloud(object):
aws = "aws"
azure = "azure"
class DatabricksConfig(SparkEngineConfig):
"""Databricks cloud for Apache Spark """
_conf__task_family = "databricks"
cluster_type = SparkClusters.databricks
stop_session_on_finish = False
cluster_id = parameter(default=None).help("existing cluster id")[str]
cloud_type = parameter().help("cloud type: aws/azure")[str]
conn_id = parameter.value(default="databricks_default").help(
"databricks connection settings"
)[str]
connection_retry_limit = parameter.value(default=3).help(
"databricks connection - retry limit"
)[int]
connection_retry_delay = parameter.value(default=1).help(
"databricks connection - delay in between retries"
)[int]
status_polling_interval_seconds = parameter(default=10).help(
"seconds to sleep between polling databricks for job status."
)[int]
cluster_log_conf = parameter(default={}).help(
'location for logs, like: {"s3": {"destination": "s3://<BUCKET>/<KEY>", "region": "us-east-1"}}"'
)
# new cluster config
num_workers = parameter(default=0).help("number of workers as in databricks api.")[
int
]
init_scripts = parameter(default=[]).help(
"init script list, default:{ 's3': { 'destination' : 's3://init_script_bucket/prefix', 'region' : 'us-west-2' } }'"
)[List]
spark_version = parameter().help("spark version")[str]
spark_conf = parameter(default={}).help("spark config")[Dict]
node_type_id = parameter(default="").help("nodes for spark machines")[str]
spark_env_vars = parameter(default={}).help("spark env vars")[Dict]
def get_spark_ctrl(self, task_run):
from dbnd_databricks.databricks import DatabricksCtrl
return DatabricksCtrl(task_run=task_run)
def _validate(self):
super(DatabricksConfig, self)._validate()
if not self.cluster_id:
logger.warning(
"no databricks.cluster_id is set, will create a new databricks cluster - please remember"
" to configure your cluster parameters."
)
class DatabricksAzureConfig(Config):
_conf__task_family = "databricks_azure"
local_dbfs_mount = parameter(description="local mount for dbfs")[str]
class DatabricksAwsConfig(Config):
_conf__task_family = "databricks_aws"
# aws machine related.
ebs_count = parameter(default=1).help("nodes for spark machines")
aws_instance_profile_arn = parameter(default="<>").help(
"IAM profile for spark machines"
)[str]
aws_ebs_volume_type = parameter(default="GENERAL_PURPOSE_SSD").help("EBS type")[str]
aws_ebs_volume_count = parameter(default=1).help("num of EBS volumes")[int]
aws_ebs_volume_size = parameter(default=100).help("size of EBS volume")[int]
| 33.462366 | 123 | 0.693766 |
3bf6a814d90349f5af8b33aedb01ead415d3b1a2 | 669 | py | Python | ramos/exceptions.py | luizrabachini/ramos | 5c2c1d2d848bf481eed4e8a9e784a485581b1615 | [
"MIT"
] | 30 | 2016-07-28T18:43:37.000Z | 2022-02-25T11:49:48.000Z | ramos/exceptions.py | luizrabachini/ramos | 5c2c1d2d848bf481eed4e8a9e784a485581b1615 | [
"MIT"
] | 66 | 2016-07-28T14:48:50.000Z | 2022-03-29T18:09:45.000Z | ramos/exceptions.py | luizrabachini/ramos | 5c2c1d2d848bf481eed4e8a9e784a485581b1615 | [
"MIT"
] | 12 | 2018-10-03T02:31:21.000Z | 2021-06-30T20:26:45.000Z | class InvalidBackendError(Exception):
"""
InvalidBackendError is raised when instances of BackendPool
can't get a backend implementation
"""
def __init__(self, backend_type, backend_id, available_backends):
self.backend_type = backend_type
self.backend_id = backend_id
self.available_backends = available_backends
self.error_message = 'Invalid {} backend: {}'.format(
self.backend_type, self.backend_id
)
super(InvalidBackendError, self).__init__(
self.backend_type,
self.backend_id,
self.error_message,
self.available_backends,
)
| 31.857143 | 69 | 0.648729 |
0cf5e8148185f0800f104e9d37660bb3cbf5de2a | 7,909 | py | Python | namd_xtb.py | fhh2626/NAMD-xtb-QMMM-interface | 81aaac97791ffd6d7baaa162ab3d31684fe986bf | [
"MIT"
] | 5 | 2018-01-10T20:59:42.000Z | 2021-08-23T12:49:50.000Z | namd_xtb.py | fhh2626/NAMD-xtb-QMMM-interface | 81aaac97791ffd6d7baaa162ab3d31684fe986bf | [
"MIT"
] | null | null | null | namd_xtb.py | fhh2626/NAMD-xtb-QMMM-interface | 81aaac97791ffd6d7baaa162ab3d31684fe986bf | [
"MIT"
] | 4 | 2019-03-15T14:55:54.000Z | 2020-03-05T03:34:38.000Z | #!/usr/bin/python3
"""
NAMD-xtb interface v0.11 beta
by Haohao Fu
fhh2626_at_gmail.com
"""
"""
Readme:
this script enables NAMD-xtb QM/MM simulation and non-periodic
semi-emperial BOMD.
Electrostatic embedding was implemented. GFN-xtb hamiltonian
is used by default. If one wants to use other QM method, one can
change the source code accordingly.
Requirement:
Python 3 and numpy (I think Python 2.7+ is also possible)
Usage:
In NAMD config file:
qmSoftware "custom"
qmExecPath xxxxx/namd_xtb.py
QMElecEmbed, qmConfigLine, qmMult, qmCharge are useless.
Then set XTBDIR, QMCHARGE and GFNVER below.
"""
# the complete directory of xtb program
XTBDIR = r'/home/chinfo/software/chem/xtb/xtb'
# the charge of each independent QM part
QMCHARGE = [1]
# the version of gfn
GFNVER = 2
import os
import subprocess
import sys
import numpy as np
def read_namdinput(file):
''' read namd qmmm input file (file)
return:
qm atom -- element (python array)
atomic coordinates -- coor (n * 3D numpy array)
point charge + coor -- pcharge (n * 4D numpy array) '''
with open(file, 'r') as namdinput:
input_lines = namdinput.readlines()
line_count = 0
for line in input_lines:
line_count += 1
# the first line of namd input file contains the number
# of atoms and point charges
if line_count == 1:
atom_pcharge_num = line.strip().split()
# atom number
atom_num = int(atom_pcharge_num[0])
# point charge number
pcharge_num = int(atom_pcharge_num[1])
element = [None for i in range(atom_num)]
coor = np.zeros((atom_num,3))
pcharge = np.zeros((pcharge_num,4))
# the second part contains the atom information
if line_count > 1 and line_count <= atom_num + 1:
atom_data = line.strip().split()
# coordinates
for i in range(3):
coor[line_count - 2,i] = float(atom_data[i])
# elements
element[line_count - 2] = atom_data[3]
# the third part, point charge information
if line_count > 1 + atom_num and line_count <= 1 + atom_num + pcharge_num:
pcharge_data = line.strip().split()
# point charge
pcharge[line_count - 2 - atom_num, 0] = pcharge_data[3]
# coordinates
for i in range(3):
# xtb needs unit of Bohr in pcharge format
pcharge[line_count - 2 - atom_num, i + 1] = float(pcharge_data[i]) * 1.88973
return (element, coor, pcharge)
def write_xtbinput(xyzfile, pchargefile,element, coor, pcharge):
''' write xtb qmmm input file based on:
qm atom -- element (python array)
atomic coordinates -- coor (n * 3D numpy array)
point charge + coor -- pcharge (n * 4D numpy array)
need the directory of:
the file about atom information -- xyzfile
... about point charge information -- pchargefile'''
# xtb needs a xyz as input file
with open(xyzfile, 'w') as xyz:
xyz.write(str(len(element)))
xyz.write('\n\n')
for i in range(len(element)):
xyz.write('{} {:.6f} {:.6f} {:.6f}\n'.format(element[i], coor[i,0], coor[i,1], coor[i,2]))
# pcharge file
with open(pchargefile, 'w') as pc:
pc.write(str(len(pcharge)))
pc.write('\n')
for i in range(len(pcharge)):
pc.write('{:.6f} {:.6f} {:.6f} {:.6f}\n'.format(pcharge[i,0],
pcharge[i,1],
pcharge[i,2],
pcharge[i,3]))
def convert_input(namdinput, xtbinput_xyz, xtbinput_pc):
''' convert namdinput to xtb input '''
element, coor, pcharge = read_namdinput(namdinput)
write_xtbinput(xtbinput_xyz, xtbinput_pc, element, coor, pcharge)
def read_xtboutput(charge_file, grad_file):
''' read xtb qmmm output file (charge_file, grad_file)
return:
energy (python float)
force + charge -- info (n * 4D numpy array) '''
# read charge file and get the number of atoms
atom_charge = []
with open(charge_file, 'r') as charge:
charge_lines = charge.readlines()
for line in charge_lines:
if line.strip():
atom_charge.append(float(line.strip()))
atom_num = len(atom_charge)
info = np.zeros((atom_num,4))
# read info
with open(grad_file, 'r') as grad:
grad_lines = grad.readlines()
count = 0
for line in grad_lines:
count += 1
# the first line is useless
if count == 1:
continue
# the second line contains the energy
if count == 2:
data = line.strip().split()
# the unit of energy in xtb is hartrees
energy = float(data[6]) * 630
if count > atom_num + 2 and count <= atom_num + atom_num + 2:
data = line.strip().split()
# force
for i in range(3):
# the unit of grad in xtb output should be hartrees/bohr
info[count - atom_num - 2 - 1, i] = float(data[i].replace('D','E')) * (-630) * 1.88973
info[count - atom_num - 2 - 1, 3] = atom_charge[count - atom_num - 2 - 1]
return energy, info
def write_namdoutput(file, energy, info):
''' write namd qmmm output file based on:
energy (python float)
force + charge -- info (n * 4D numpy array)
need the directory of:
the file read by namd -- file'''
with open(file, 'w') as namdoutput:
namdoutput.write('{:.6f}\n'.format(energy))
for i in range(len(info)):
namdoutput.write('{:.6f} {:.6f} {:.6f} {:.6f}\n'.format(
info[i,0], info[i,1], info[i,2], info[i,3]))
def convert_output(xtboutput_charge, xtboutput_grad, namdoutput):
''' convert xtb output to namd output '''
energy, info = read_xtboutput(xtboutput_charge, xtboutput_grad)
write_namdoutput(namdoutput, energy, info)
def run_qmmm(directory, outputRedirect):
''' the main function called by namd every step,
the complete directory of the input file -- directory
a file to record useless information to prevent from
too many things in namd log file -- stdoutput'''
path = os.path.dirname(directory)
base = os.path.basename(directory)
# namd deal with different qm part independently
# one can set qm charge of each part
qmpart = int(path.split('/')[-1])
xtbxyz = path + r'/xtbxyz.xyz'
xtbpcfile = path + r'/pcharge'
xtbcharges = path + r'/charges'
xtbgrad = path + r'/gradient'
xtbrestart = path + r'/xtbrestart'
namdoutput = path + r'/' + base + '.result'
energy = path + r'/energy'
convert_input(directory, xtbxyz, xtbpcfile)
subprocess.call([XTBDIR, xtbxyz, '--grad', '--chrg', str(QMCHARGE[qmpart]), '--gfn', str(GFNVER)],
stdout = outputRedirect, stderr = outputRedirect)
convert_output(xtbcharges, xtbgrad, namdoutput)
# otherwise xtb will restart a run
os.remove(xtbxyz)
os.remove(xtbpcfile)
os.remove(xtbcharges)
os.remove(xtbgrad)
os.remove(xtbrestart)
if __name__ == '__main__':
useless = open(os.devnull, 'w')
run_qmmm(sys.argv[1], useless)
useless.close()
| 38.580488 | 107 | 0.56265 |
c42a95a88d3ba2cf966ae5a0d0c196328f040a8d | 9,044 | py | Python | sklearn_helpers/transformations.py | FlorisHoogenboom/sklearn-helpers | 7a8048130db1128b5f933029fceb1c216c6e9c5d | [
"MIT"
] | 1 | 2019-06-14T17:41:33.000Z | 2019-06-14T17:41:33.000Z | sklearn_helpers/transformations.py | FlorisHoogenboom/sklearn-helpers | 7a8048130db1128b5f933029fceb1c216c6e9c5d | [
"MIT"
] | null | null | null | sklearn_helpers/transformations.py | FlorisHoogenboom/sklearn-helpers | 7a8048130db1128b5f933029fceb1c216c6e9c5d | [
"MIT"
] | null | null | null | from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
import pandas as pd
import numpy as np
class Transformer(BaseEstimator, TransformerMixin):
"""A helper class that turns a Python function that
transforms some data into a Scikit-Learn transformer.
Examples
----------
Can be best used as a decorator on an existing function
```
@Transformer
def my_transforming_function(data):
pass
````
Parameters
----------
func : callable that contains the transformation.
Attributes
----------
_transformer : private attribute that contains the transformer
function used
"""
def __init__(self, func):
if not callable(func):
raise ValueError('Function should be callable')
self._transformer = func
self.skip_validation = True
def fit(self, data, y):
"""Empty shell only to adhere to the scikit-learn api
Returns
----------
self : the instance of the transformer
"""
return self
def transform(self, data):
"""Transform using the transformer function
Parameters
----------
data : the dataset to transform
"""
if not self.skip_validation:
check_array(
data,
dtype=None,
copy=False,
force_all_finite=False
)
data = data.copy()
return self._transformer(data)
class PandasTransformer(Transformer):
"""A helper class that wraps a given transformation function
and makes it adhere to she Scikit-Learn api.
"""
def __init__(self, func):
super(PandasTransformer, self).__init__(func)
self.skip_validation = True
@staticmethod
def check_is_pandas_dataframe(data):
if not isinstance(data, pd.DataFrame):
raise ValueError('Data should be a pandas dataframe')
return data
def transform(self, data):
"""Transform using the transformer function
Parameters
----------
data : the dataset to transform
"""
self.check_is_pandas_dataframe(data)
return super(PandasTransformer, self).transform(data)
class ColumnSelector(Transformer):
"""A helper class that selects a specific subset of columns for
further analysis.
Parameters
----------
columns : a list of integers that specifies which columns should be kept
Attributes
----------
columns : attribute containning the columns being selected
handle_unknown : attribute whether to ignore non-existing columns,
str: 'error' (Default) or 'ignore'
"""
def __init__(self, columns=None, handle_unknown='error', complement=False):
self.columns = columns
self.handle_unknown = handle_unknown
self.complement = complement
# Call superclass with the desired transformer function
super(ColumnSelector, self).__init__(self.transform_func)
# TODO: docs....
def _columns_to_select(self, columns, data):
if not self.complement:
return columns
else:
if type(columns) is int:
columns = [columns]
all = list(range(data.shape[0]))
return [col for col in all if col not in columns]
# TODO: docs....
def _check_columns(self, data):
if type(self.columns) is int:
return self.columns
if self.handle_unknown == 'ignore':
return [col for col in self.columns if col < data.shape[1]]
elif len(self.columns) > 0 and max(self.columns) >= data.shape[1]:
raise IndexError('Could not select the desired columns')
else:
return self.columns
def transform_func(self, data):
"""Selects columns in self.columns of the given argument data.
Parameters
----------
data : A array like shape that support NumPy like indexing.
"""
if self.columns is None:
return data
columns = self._check_columns(data)
columns = self._columns_to_select(columns, data)
return data[:,columns]
@property
def columns(self):
return self.columns_
@columns.setter
def columns(self, columns):
if columns is None:
self.columns_ = None
return
if (type(columns) is not int and (
type(columns) is not list or
not all(map(lambda x: isinstance(x, int), columns)
))
):
raise ValueError(
'Columns should be a single integer or a list of integers'
)
self.columns_ = columns
class PandasColumnSelector(ColumnSelector, PandasTransformer):
# TODO: docs....
def _columns_to_select(self, columns, data):
if not self.complement:
return columns
elif not self.named_columns:
if type(columns) is int:
columns = [columns]
return super(PandasColumnSelector, self)._columns_to_select(
columns,
data
)
else:
if type(columns) is str:
columns = [columns]
return [col for col in data.columns if not col in columns]
# TODO: docs....
def _check_columns(self, data):
if type(self.columns) in (str, int):
return self.columns
if self.handle_unknown == 'ignore' and self.named_columns:
return [col for col in self.columns if col in data.columns]
elif self.handle_unknown == 'ignore' and not self.named_columns:
return [col for col in self.columns if col < data.shape[1]]
# If we do not handle unkowns
if (
self.named_columns and
not set(self.columns).issubset(set(data.columns))
):
raise IndexError(
'Columns {0} not contained in axis.'.format(
set(self.columns).difference(set(data.columns))
)
)
elif(
not self.named_columns and
len(self.columns) > 0 and
not max(self.columns) <= data.shape[1]
):
raise IndexError('Column indicecs out of bounds')
else:
return self.columns
def transform_func(self, data):
"""Selects columns in self.columns of the given Pandas DataFrame.
Parameters
----------
data : Pandas DataFrame on which column selection should be performed.
"""
if self.columns is None:
return data
columns = self._check_columns(data)
columns = self._columns_to_select(columns, data)
if self.named_columns:
return data[columns]
else:
return data.iloc[:, columns]
@property
def columns(self):
return self.columns_
@columns.setter
def columns(self, columns):
if columns is None:
self.columns_ = None
return
if (
type(columns) is list and
all(map(lambda x: isinstance(x, int), columns))
):
self.columns_ = columns
self.named_columns = False
elif (
type(columns) is list and
all(map(lambda x: isinstance(x, str), columns))
):
self.columns_ = columns
self.named_columns = True
elif (
type(columns) is int
):
self.columns_ = columns
self.named_columns = False
elif (
type(columns) is str
):
self.columns_ = columns
self.named_columns = True
else:
raise ValueError(
'Columns should be a singe instance or a list of strings or integers'
)
class PandasTypeSelector(ColumnSelector, PandasTransformer):
def __init__(self, types):
super(PandasTypeSelector, self).__init__()
self.types = types
def transform_func(self, data):
if type(self.types) is str:
types = [self.types]
return data.select_dtypes(include=types)
class PandasCatColumnSelector(PandasColumnSelector):
def __init__(self):
super(PandasCatColumnSelector, self).__init__()
def transform_func(self, data):
cat_cols = data.select_dtypes(exclude=[np.number]).columns
self.columns = [data.columns.get_loc(col) for col in cat_cols]
return super(PandasCatColumnSelector, self).transform_func(data)
class PandasNonCatColumnSelector(PandasColumnSelector):
def __init__(self):
super(PandasNonCatColumnSelector, self).__init__()
def transform_func(self, data):
cat_cols = data.select_dtypes(include=[np.number]).columns
self.columns = [data.columns.get_loc(col) for col in cat_cols]
return super(PandasNonCatColumnSelector, self).transform_func(data) | 29.848185 | 85 | 0.591995 |
51162ec7a66becea73723ec54b465e0f2033dfe5 | 41,833 | py | Python | frappe-bench/env/lib/python2.7/site-packages/cssutils/serialize.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | frappe-bench/env/lib/python2.7/site-packages/cssutils/serialize.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | frappe-bench/env/lib/python2.7/site-packages/cssutils/serialize.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""cssutils serializer"""
__all__ = ['CSSSerializer', 'Preferences']
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from cssutils.helper import normalize
import codecs
import cssutils
import helper
import re
import xml.dom
def _escapecss(e):
"""
Escapes characters not allowed in the current encoding the CSS way
with a backslash followed by a uppercase hex code point
E.g. the german umlaut 'ä' is escaped as \E4
"""
s = e.object[e.start:e.end]
return u''.join([ur'\%s ' % str(hex(ord(x)))[2:] # remove 0x from hex
.upper() for x in s]), e.end
codecs.register_error('escapecss', _escapecss)
class Preferences(object):
"""Control output of CSSSerializer.
defaultAtKeyword = True
Should the literal @keyword from src CSS be used or the default
form, e.g. if ``True``: ``@import`` else: ``@i\mport``
defaultPropertyName = True
Should the normalized propertyname be used or the one given in
the src file, e.g. if ``True``: ``color`` else: ``c\olor``
Only used if ``keepAllProperties==False``.
defaultPropertyPriority = True
Should the normalized or literal priority be used, e.g. ``!important``
or ``!Im\portant``
importHrefFormat = None
Uses hreftype if ``None`` or format ``"URI"`` if ``'string'`` or
format ``url(URI)`` if ``'uri'``
indent = 4 * ' '
Indentation of e.g Properties inside a CSSStyleDeclaration
indentClosingBrace = True
Defines if closing brace of block is indented to match indentation
of the block (default) oder match indentation of selector.
indentSpecificities = False (**EXPERIMENTAL**)
Indent rules with subset of Selectors and higher Specitivity
keepAllProperties = True
If ``True`` all properties set in the original CSSStylesheet
are kept meaning even properties set twice with the exact same
same name are kept!
keepComments = True
If ``False`` removes all CSSComments
keepEmptyRules = False
defines if empty rules like e.g. ``a {}`` are kept in the resulting
serialized sheet
keepUnknownAtRules = True
defines if unknown @rules like e.g. ``@three-dee {}`` are kept in the
serialized sheet
keepUsedNamespaceRulesOnly = False
if True only namespace rules which are actually used are kept
lineNumbers = False
Only used if a complete CSSStyleSheet is serialized.
lineSeparator = u'\\n'
How to end a line. This may be set to e.g. u'' for serializing of
CSSStyleDeclarations usable in HTML style attribute.
listItemSpacer = u' '
string which is used in ``css.SelectorList``, ``css.CSSValue`` and
``stylesheets.MediaList`` after the comma
minimizeColorHash = True
defines if colorhash should be minimized from full size to shorthand
e.g minimize #FFFFFF to #FFF
normalizedVarNames = True
defines if variable names should be serialized normalized (they are
used as being normalized anyway)
omitLastSemicolon = True
If ``True`` omits ; after last property of CSSStyleDeclaration
omitLeadingZero = False
defines if values between -1 and 1 should omit the 0, like ``.5px``
paranthesisSpacer = u' '
string which is used before an opening paranthesis like in a
``css.CSSMediaRule`` or ``css.CSSStyleRule``
propertyNameSpacer = u' '
string which is used after a Property name colon
resolveVariables = True
if ``True`` all variable references are tried to resolved and
all CSSVariablesRules are removed from the output.
Any variable reference not resolvable is simply kept untouched.
selectorCombinatorSpacer = u' '
string which is used before and after a Selector combinator like +, > or ~.
CSSOM defines a single space for this which is also the default in cssutils.
spacer = u' '
general spacer, used e.g. by CSSUnknownRule
validOnly = False
if True only valid (Properties) are output
A Property is valid if it is a known Property with a valid value.
"""
def __init__(self, **initials):
"""Always use named instead of positional parameters."""
self.useDefaults()
for key, value in initials.items():
if value:
self.__setattr__(key, value)
def __repr__(self):
return u"cssutils.css.%s(%s)" % (self.__class__.__name__,
u', '.join(['\n %s=%r' % (p, self.__getattribute__(p)) for p in self.__dict__]
))
def __str__(self):
return u"<cssutils.css.%s object %s at 0x%x" % (self.__class__.__name__,
u' '.join(['%s=%r' % (p, self.__getattribute__(p)) for p in self.__dict__]
),
id(self))
def useDefaults(self):
"Reset all preference options to their default value."
self.defaultAtKeyword = True
self.defaultPropertyName = True
self.defaultPropertyPriority = True
self.importHrefFormat = None
self.indent = 4 * u' '
self.indentClosingBrace = True
self.indentSpecificities = False
self.keepAllProperties = True
self.keepComments = True
self.keepEmptyRules = False
self.keepUnknownAtRules = True
self.keepUsedNamespaceRulesOnly = False
self.lineNumbers = False
self.lineSeparator = u'\n'
self.listItemSpacer = u' '
self.minimizeColorHash = True
self.normalizedVarNames = True
self.omitLastSemicolon = True
self.omitLeadingZero = False
self.paranthesisSpacer = u' '
self.propertyNameSpacer = u' '
self.resolveVariables = True
self.selectorCombinatorSpacer = u' '
self.spacer = u' '
self.validOnly = False # should not be changed currently!!!
def useMinified(self):
"""Set options resulting in a minified stylesheet.
You may want to set preferences with this convenience method
and override specific settings you want adjusted afterwards.
"""
self.importHrefFormat = 'string'
self.indent = u''
self.keepComments = False
self.keepEmptyRules = False
self.keepUnknownAtRules = False
self.keepUsedNamespaceRulesOnly = True
self.lineNumbers = False
self.lineSeparator = u''
self.listItemSpacer = u''
self.minimizeColorHash = True
self.omitLastSemicolon = True
self.omitLeadingZero = True
self.paranthesisSpacer = u''
self.propertyNameSpacer = u''
self.selectorCombinatorSpacer = u''
self.spacer = u''
self.validOnly = False
class Out(object):
"""A simple class which makes appended items available as a combined string"""
def __init__(self, ser):
self.ser = ser
self.out = []
def _remove_last_if_S(self):
if self.out and not self.out[-1].strip():
# remove trailing S
del self.out[-1]
def append(self, val, type_=None, space=True, keepS=False, indent=False, alwaysS=False):
"""Appends val. Adds a single S after each token except as follows:
- typ COMMENT
uses cssText depending on self.ser.prefs.keepComments
- typ "Property", cssutils.css.CSSRule.UNKNOWN_RULE
uses cssText
- typ STRING
escapes helper.string
- typ S
ignored except ``keepS=True``
- typ URI
calls helper.uri
- val ``{``
adds LF after
- val ``;``, typ 'styletext'
removes S before and adds LF after
- val ``, :``
removes S before
- val ``+ > ~``
encloses in prefs.selectorCombinatorSpacer
- some other vals
add ``*spacer`` except ``space=False``
"""
prefspace = self.ser.prefs.spacer
if val or type_ in ('STRING', 'URI'):
# PRE
if 'COMMENT' == type_:
if self.ser.prefs.keepComments:
val = val.cssText
else:
return
elif 'S' == type_ and not keepS:
return
elif 'S' == type_ and keepS:
val = u' '
elif 'STRING' == type_:
# may be empty but MUST not be None
if val is None:
return
val = helper.string(val)
if not prefspace:
self._remove_last_if_S()
elif 'URI' == type_:
val = helper.uri(val)
elif 'HASH' == type_:
val = self.ser._hash(val)
elif hasattr(val, 'cssText'):
val = val.cssText
elif hasattr(val, 'mediaText'):
val = val.mediaText
elif val in u'+>~,:{;)]/=}' and not alwaysS:
self._remove_last_if_S()
# elif type_ in ('Property', cssutils.css.CSSRule.UNKNOWN_RULE):
# val = val.cssText
# elif type_ in ('NUMBER', 'DIMENSION', 'PERCENTAGE') and val == u'0':
# # remove sign + or - if value is zero
# # TODO: only for lenghts!
# if self.out and self.out[-1] in u'+-':
# del self.out[-1]
# APPEND
if indent or (val == u'}' and self.ser.prefs.indentClosingBrace):
self.out.append(self.ser._indentblock(val, self.ser._level+1))
else:
if val.endswith(u' '):
self._remove_last_if_S()
self.out.append(val)
# POST
if alwaysS and val in u'-+*/': # calc, */ not really but do anyway
self.out.append(u' ')
elif val in u'+>~': # enclose selector combinator
self.out.insert(-1, self.ser.prefs.selectorCombinatorSpacer)
self.out.append(self.ser.prefs.selectorCombinatorSpacer)
elif u')' == val and not keepS: # CHAR funcend
# TODO: pref?
self.out.append(u' ')
elif u',' == val: # list
self.out.append(self.ser.prefs.listItemSpacer)
elif u':' == val: # prop
self.out.append(self.ser.prefs.propertyNameSpacer)
elif u'{' == val: # block start
self.out.insert(-1, self.ser.prefs.paranthesisSpacer)
self.out.append(self.ser.prefs.lineSeparator)
elif u';' == val or 'styletext' == type_: # end or prop or block
self.out.append(self.ser.prefs.lineSeparator)
elif val not in u'}[]()/=' and space and type_ != 'FUNCTION':
self.out.append(self.ser.prefs.spacer)
if type_ != 'STRING' and not self.ser.prefs.spacer and \
self.out and not self.out[-1].endswith(u' '):
self.out.append(u' ')
def value(self, delim=u'', end=None, keepS=False):
"returns all items joined by delim"
if not keepS:
self._remove_last_if_S()
if end:
self.out.append(end)
return delim.join(self.out)
class CSSSerializer(object):
"""Serialize a CSSStylesheet and its parts.
To use your own serializing method the easiest is to subclass CSS
Serializer and overwrite the methods you like to customize.
"""
def __init__(self, prefs=None):
"""
:param prefs:
instance of Preferences
"""
if not prefs:
prefs = Preferences()
self.prefs = prefs
self._level = 0 # current nesting level
# TODO:
self._selectors = [] # holds SelectorList
self._selectorlevel = 0 # current specificity nesting level
def _atkeyword(self, rule):
"returns default or source atkeyword depending on prefs"
if self.prefs.defaultAtKeyword:
return rule.atkeyword # default
else:
return rule._keyword
def _indentblock(self, text, level):
"""
indent a block like a CSSStyleDeclaration to the given level
which may be higher than self._level (e.g. for CSSStyleDeclaration)
"""
if not self.prefs.lineSeparator:
return text
return self.prefs.lineSeparator.join(
[u'%s%s' % (level * self.prefs.indent, line)
for line in text.split(self.prefs.lineSeparator)]
)
def _propertyname(self, property, actual):
"""
used by all styledeclarations to get the propertyname used
dependent on prefs setting defaultPropertyName and
keepAllProperties
"""
if self.prefs.defaultPropertyName and not self.prefs.keepAllProperties:
return property.name
else:
return actual
def _linenumnbers(self, text):
if self.prefs.lineNumbers:
pad = len(str(text.count(self.prefs.lineSeparator)+1))
out = []
for i, line in enumerate(text.split(self.prefs.lineSeparator)):
out.append((u'%*i: %s') % (pad, i+1, line))
text = self.prefs.lineSeparator.join(out)
return text
def _hash(self, val, type_=None):
"""
Short form of hash, e.g. #123 instead of #112233
"""
if self.prefs.minimizeColorHash and\
len(val) == 7 and\
val[1] == val[2] and\
val[3] == val[4] and\
val[5] == val[6]:
return u'#%s%s%s' % (val[1], val[3], val[5])
return val
def _valid(self, x):
"checks items valid property and prefs.validOnly"
return not self.prefs.validOnly or (self.prefs.validOnly and
x.valid)
def do_CSSStyleSheet(self, stylesheet):
"""serializes a complete CSSStyleSheet"""
useduris = stylesheet._getUsedURIs()
out = []
for rule in stylesheet.cssRules:
if self.prefs.keepUsedNamespaceRulesOnly and\
rule.NAMESPACE_RULE == rule.type and\
rule.namespaceURI not in useduris and (
rule.prefix or None not in useduris):
continue
cssText = rule.cssText
if cssText:
out.append(cssText)
text = self._linenumnbers(self.prefs.lineSeparator.join(out))
# get encoding of sheet, defaults to UTF-8
try:
encoding = stylesheet.cssRules[0].encoding
except (IndexError, AttributeError):
encoding = u'UTF-8'
# TODO: py3 return b str but tests use unicode?
return text.encode(encoding, u'escapecss')
def do_CSSComment(self, rule):
"""
serializes CSSComment which consists only of commentText
"""
if rule._cssText and self.prefs.keepComments:
return rule._cssText
else:
return u''
def do_CSSCharsetRule(self, rule):
"""
serializes CSSCharsetRule
encoding: string
always @charset "encoding";
no comments or other things allowed!
"""
if rule.wellformed:
return u'@charset %s;' % helper.string(rule.encoding)
else:
return u''
def do_CSSVariablesRule(self, rule):
"""
serializes CSSVariablesRule
media
TODO
variables
CSSStyleDeclaration
+ CSSComments
"""
variablesText = rule.variables.cssText
if variablesText and rule.wellformed and not self.prefs.resolveVariables:
out = Out(self)
out.append(self._atkeyword(rule))
for item in rule.seq:
# assume comments {
out.append(item.value, item.type)
out.append(u'{')
out.append(u'%s%s}' % (variablesText, self.prefs.lineSeparator),
indent=1)
return out.value()
else:
return u''
def do_CSSFontFaceRule(self, rule):
"""
serializes CSSFontFaceRule
style
CSSStyleDeclaration
+ CSSComments
"""
styleText = self.do_css_CSSStyleDeclaration(rule.style)
if styleText and rule.wellformed:
out = Out(self)
out.append(self._atkeyword(rule))
for item in rule.seq:
# assume comments {
out.append(item.value, item.type)
out.append(u'{')
out.append(u'%s%s}' % (styleText, self.prefs.lineSeparator),
indent=1)
return out.value()
else:
return u''
def do_CSSImportRule(self, rule):
"""
serializes CSSImportRule
href
string
media
optional cssutils.stylesheets.medialist.MediaList
name
optional string
+ CSSComments
"""
if rule.wellformed:
out = Out(self)
out.append(self._atkeyword(rule))
for item in rule.seq:
type_, val = item.type, item.value
if 'href' == type_:
# "href" or url(href)
if self.prefs.importHrefFormat == 'string' or (
self.prefs.importHrefFormat != 'uri' and
rule.hreftype == 'string'):
out.append(val, 'STRING')
else:
out.append(val, 'URI')
elif 'media' == type_:
# media
mediaText = self.do_stylesheets_medialist(val)
if mediaText and mediaText != u'all':
out.append(mediaText)
elif 'name' == type_:
out.append(val, 'STRING')
else:
out.append(val, type_)
return out.value(end=u';')
else:
return u''
def do_CSSNamespaceRule(self, rule):
"""
serializes CSSNamespaceRule
uri
string
prefix
string
+ CSSComments
"""
if rule.wellformed:
out = Out(self)
out.append(self._atkeyword(rule))
for item in rule.seq:
type_, val = item.type, item.value
if 'namespaceURI' == type_:
out.append(val, 'STRING')
else:
out.append(val, type_)
return out.value(end=u';')
else:
return u''
def do_CSSMediaRule(self, rule):
"""
serializes CSSMediaRule
+ CSSComments
"""
# TODO: use Out()?
# mediaquery
if not rule.media.wellformed:
return u''
# @media
out = [self._atkeyword(rule)]
if not len(self.prefs.spacer):
# for now always with space as only webkit supports @mediaall?
out.append(u' ')
else:
out.append(self.prefs.spacer) # might be empty
out.append(self.do_stylesheets_medialist(rule.media))
# name, seq contains content after name only (Comments)
if rule.name:
out.append(self.prefs.spacer)
nameout = Out(self)
nameout.append(helper.string(rule.name))
for item in rule.seq:
nameout.append(item.value, item.type)
out.append(nameout.value())
# {
out.append(self.prefs.paranthesisSpacer)
out.append(u'{')
out.append(self.prefs.lineSeparator)
# rules
rulesout = []
for r in rule.cssRules:
rtext = r.cssText
if rtext:
# indent each line of cssText
rulesout.append(self._indentblock(rtext, self._level + 1))
rulesout.append(self.prefs.lineSeparator)
if not self.prefs.keepEmptyRules and not u''.join(rulesout).strip():
return u''
out.extend(rulesout)
# }
out.append(u'%s}' % ((self._level + int(self.prefs.indentClosingBrace))
* self.prefs.indent))
return u''.join(out)
def do_CSSPageRule(self, rule):
"""
serializes CSSPageRule
selectorText
string
style
CSSStyleDeclaration
cssRules
CSSRuleList of MarginRule objects
+ CSSComments
"""
# rules
rules = u''
rulesout = []
for r in rule.cssRules:
rtext = r.cssText
if rtext:
rulesout.append(rtext)
rulesout.append(self.prefs.lineSeparator)
rulesText = u''.join(rulesout)#.strip()
# omit semicolon only if no MarginRules
styleText = self.do_css_CSSStyleDeclaration(rule.style,
omit=not rulesText)
if (styleText or rulesText) and rule.wellformed:
out = Out(self)
out.append(self._atkeyword(rule))
out.append(rule.selectorText)
out.append(u'{')
if styleText:
if not rulesText:
out.append(u'%s%s' % (styleText,
self.prefs.lineSeparator
), indent=1)
else:
out.append(styleText, type_='styletext', indent=1, space=False)
if rulesText:
out.append(rulesText, indent=1)
#?
self._level -= 1
out.append(u'}')
self._level += 1
return out.value()
else:
return u''
def do_CSSPageRuleSelector(self, seq):
"Serialize selector of a CSSPageRule"
out = Out(self)
for item in seq:
if item.type == 'IDENT':
out.append(item.value, item.type, space=False)
else:
out.append(item.value, item.type)
return out.value()
def do_MarginRule(self, rule):
"""
serializes MarginRule
atkeyword
string
style
CSSStyleDeclaration
+ CSSComments
"""
# might not be set at all?!
if rule.atkeyword:
styleText = self.do_css_CSSStyleDeclaration(rule.style)
if styleText and rule.wellformed:
out = Out(self)
# # use seq but styledecl missing
# for item in rule.seq:
# if item.type == 'ATKEYWORD':
# # move logic to Out
# out.append(self._atkeyword(rule), type_=item.type)
# else:
# print type_, val
# out.append(item.value, item.type)
# return out.value()
# ok for now:
out.append(self._atkeyword(rule), type_='ATKEYWORD')
out.append(u'{')
out.append(u'%s%s' % (self._indentblock(styleText, self._level+1),
self.prefs.lineSeparator))
out.append(u'}')
return out.value()
return u''
def do_CSSUnknownRule(self, rule):
"""
serializes CSSUnknownRule
anything until ";" or "{...}"
+ CSSComments
"""
if rule.wellformed and self.prefs.keepUnknownAtRules:
out = Out(self)
out.append(rule.atkeyword)
stacks = []
for item in rule.seq:
type_, val = item.type, item.value
# PRE
if u'}' == val:
# close last open item on stack
stackblock = stacks.pop().value()
if stackblock:
val = self._indentblock(
stackblock + self.prefs.lineSeparator + val,
min(1, len(stacks)+1))
else:
val = self._indentblock(val, min(1, len(stacks)+1))
# APPEND
if stacks:
stacks[-1].append(val, type_)
else:
out.append(val, type_)
# POST
if u'{' == val:
# new stack level
stacks.append(Out(self))
return out.value()
else:
return u''
def do_CSSStyleRule(self, rule):
"""
serializes CSSStyleRule
selectorList
style
+ CSSComments
"""
# TODO: use Out()
# prepare for element nested rules
# TODO: sort selectors!
if self.prefs.indentSpecificities:
# subselectorlist?
elements = set([s.element for s in rule.selectorList])
specitivities = [s.specificity for s in rule.selectorList]
for selector in self._selectors:
lastelements = set([s.element for s in selector])
if elements.issubset(lastelements):
# higher specificity?
lastspecitivities = [s.specificity for s in selector]
if specitivities > lastspecitivities:
self._selectorlevel += 1
break
elif self._selectorlevel > 0:
self._selectorlevel -= 1
else:
# save new reference
self._selectors.append(rule.selectorList)
self._selectorlevel = 0
# TODO ^ RESOLVE!!!!
selectorText = self.do_css_SelectorList(rule.selectorList)
if not selectorText or not rule.wellformed:
return u''
self._level += 1
styleText = u''
try:
styleText = self.do_css_CSSStyleDeclaration(rule.style)
finally:
self._level -= 1
if not styleText:
if self.prefs.keepEmptyRules:
return u'%s%s{}' % (selectorText,
self.prefs.paranthesisSpacer)
else:
return self._indentblock(
u'%s%s{%s%s%s%s}' % (
selectorText,
self.prefs.paranthesisSpacer,
self.prefs.lineSeparator,
self._indentblock(styleText, self._level + 1),
self.prefs.lineSeparator,
(self._level + int(self.prefs.indentClosingBrace))
* self.prefs.indent),
self._selectorlevel)
def do_css_SelectorList(self, selectorlist):
"comma-separated list of Selectors"
# does not need Out() as it is too simple
if selectorlist.wellformed:
out = []
for part in selectorlist.seq:
if isinstance(part, cssutils.css.Selector):
out.append(part.selectorText)
else:
out.append(part) # should not happen
sep = u',%s' % self.prefs.listItemSpacer
return sep.join(out)
else:
return u''
def do_css_Selector(self, selector):
"""
a single Selector including comments
an element has syntax (namespaceURI, name) where namespaceURI may be:
- cssutils._ANYNS => ``*|name``
- None => ``name``
- u'' => ``|name``
- any other value: => ``prefix|name``
"""
if selector.wellformed:
out = Out(self)
DEFAULTURI = selector._namespaces.get('', None)
for item in selector.seq:
type_, val = item.type, item.value
if isinstance(val, tuple):
# namespaceURI|name (element or attribute)
namespaceURI, name = val
if DEFAULTURI == namespaceURI or (not DEFAULTURI and
namespaceURI is None):
out.append(name, type_, space=False)
else:
if namespaceURI == cssutils._ANYNS:
prefix = u'*'
else:
try:
prefix = selector._namespaces.prefixForNamespaceURI(
namespaceURI)
except IndexError:
prefix = u''
out.append(u'%s|%s' % (prefix, name), type_, space=False)
else:
out.append(val, type_, space=False, keepS=True)
return out.value()
else:
return u''
def do_css_CSSVariablesDeclaration(self, variables):
"""Variables of CSSVariableRule."""
if len(variables.seq) > 0:
out = Out(self)
lastitem = len(variables.seq) - 1
for i, item in enumerate(variables.seq):
type_, val = item.type, item.value
if u'var' == type_:
name, cssvalue = val
if self.prefs.normalizedVarNames:
name = normalize(name)
out.append(name)
out.append(u':')
out.append(cssvalue.cssText)
if i < lastitem or not self.prefs.omitLastSemicolon:
out.append(u';')
elif isinstance(val, cssutils.css.CSSComment):
# CSSComment
out.append(val, 'COMMENT')
out.append(self.prefs.lineSeparator)
else:
out.append(val.cssText, type_)
out.append(self.prefs.lineSeparator)
return out.value().strip()
else:
return u''
def do_css_CSSStyleDeclaration(self, style, separator=None, omit=True):
"""
Style declaration of CSSStyleRule
"""
# TODO: use Out()
# may be comments only
if len(style.seq) > 0:
if separator is None:
separator = self.prefs.lineSeparator
if self.prefs.keepAllProperties:
# all
seq = style.seq
else:
# only effective ones
_effective = style.getProperties()
seq = [item for item in style.seq
if (isinstance(item.value, cssutils.css.Property)
and item.value in _effective)
or not isinstance(item.value, cssutils.css.Property)]
out = []
omitLastSemicolon = omit and self.prefs.omitLastSemicolon
for i, item in enumerate(seq):
type_, val = item.type, item.value
if isinstance(val, cssutils.css.CSSComment):
# CSSComment
if self.prefs.keepComments:
out.append(val.cssText)
out.append(separator)
elif isinstance(val, cssutils.css.Property):
# PropertySimilarNameList
if val.cssText:
out.append(val.cssText)
if not (omitLastSemicolon and i==len(seq)-1):
out.append(u';')
out.append(separator)
elif isinstance(val, cssutils.css.CSSUnknownRule):
# @rule
out.append(val.cssText)
out.append(separator)
else:
# ?
out.append(val)
out.append(separator)
if out and out[-1] == separator:
del out[-1]
return u''.join(out)
else:
return u''
def do_Property(self, property):
"""
Style declaration of CSSStyleRule
Property has a seqs attribute which contains seq lists for
name, a CSSvalue and a seq list for priority
"""
# TODO: use Out()
out = []
if property.seqs[0] and property.wellformed and self._valid(property):
nameseq, value, priorityseq = property.seqs
#name
for part in nameseq:
if hasattr(part, 'cssText'):
out.append(part.cssText)
elif property.literalname == part:
out.append(self._propertyname(property, part))
else:
out.append(part)
if out and (not property._mediaQuery or
property._mediaQuery and value.cssText):
# MediaQuery may consist of name only
out.append(u':')
out.append(self.prefs.propertyNameSpacer)
# value
out.append(value.cssText)
# priority
if out and priorityseq:
out.append(u' ')
for part in priorityseq:
if hasattr(part, 'cssText'): # comments
out.append(part.cssText)
else:
if part == property.literalpriority and\
self.prefs.defaultPropertyPriority:
out.append(property.priority)
else:
out.append(part)
return u''.join(out)
def do_Property_priority(self, priorityseq):
"""
a Properties priority "!" S* "important"
"""
# TODO: use Out()
out = []
for part in priorityseq:
if hasattr(part, 'cssText'): # comments
out.append(u' ')
out.append(part.cssText)
out.append(u' ')
else:
out.append(part)
return u''.join(out).strip()
def do_css_PropertyValue(self, value, valuesOnly=False):
"""Serializes a PropertyValue"""
if not value:
return u''
else:
out = Out(self)
for item in value.seq:
type_, val = item.type, item.value
if valuesOnly and type_ == cssutils.css.CSSComment:
continue
elif hasattr(val, 'cssText'):
# RGBColor or CSSValue if a CSSValueList
out.append(val.cssText, type_)
else:
if val and val[0] == val[-1] and val[0] in '\'"':
val = helper.string(val[1:-1])
# S must be kept! in between values but no extra space
out.append(val, type_)
return out.value()
def _strip_zeros(self, s):
i = s.index(u'.') + 2
a, b = s[0:i], s[i:len(s)]
b = b.rstrip('0')
return a + b
def do_css_Value(self, value, valuesOnly=None):
"""Serializes a Value, valuesOnly is ignored"""
if not value:
return u''
else:
out = Out(self)
if value.type in (u'DIMENSION', u'NUMBER', u'PERCENTAGE'):
dim = value.dimension or u''
if value.value == 0:
val = u'0'
if value.dimension in ('cm', 'mm', 'in', 'px', 'pc', 'pt',
'em', 'ex'):
dim = u''
elif value.value == int(value.value):
# cut off after . which is zero anyway
val = unicode(int(value.value))
elif self.prefs.omitLeadingZero and -1 < value.value < 1:
v = self._strip_zeros(u'%f' % value.value) # issue #27
val = v
if value._sign == u'-':
val = v[0] + v[2:]
else:
val = v[1:]
else:
val = self._strip_zeros(u'%f' % value.value) # issue #27
# keep '+' if given
if value.value != 0 and value._sign == u'+':
sign = u'+'
else:
sign = u''
out.append(sign + val + dim, value.type)
else:
# e.g. URI
out.append(value.value, value.type)
return out.value()
def do_css_ColorValue(self, value, valuesOnly=False):
"""Serialize a ColorValue, a HASH simple value or FUNCTION"""
try:
return {'FUNCTION': self.do_css_CSSFunction,
'HASH': self.do_css_Value,
'IDENT': self.do_css_Value
}[value.colorType](value,
valuesOnly=valuesOnly)
except KeyError, e:
return u''
def do_css_CSSFunction(self, cssvalue, valuesOnly=False):
"""Serialize a CSS function value"""
if not cssvalue:
return u''
else:
out = Out(self)
for item in cssvalue.seq:
type_, val = item.type, item.value
if valuesOnly and type_ == cssutils.css.CSSComment:
continue
out.append(val, type_)
return out.value()
def do_css_CSSCalc(self, cssvalue, valuesOnly=False):
"""Serialize a CSS calc value"""
if not cssvalue:
return u''
else:
out = Out(self)
for item in cssvalue.seq:
type_, val = item.type, item.value
if valuesOnly and type_ == cssutils.css.CSSComment:
continue
elif hasattr(val, 'cssText'):
# RGBColor or CSSValue if a CSSValueList
out.append(val.cssText, type_)
elif type_ == 'CHAR' and val in u'-+*/':
out.append(val, type_, alwaysS=True)
else:
out.append(val, type_)
return out.value()
def do_css_MSValue(self, cssvalue, valuesOnly=False):
"""Serialize an ExpressionValue (IE only),
should at least keep the original syntax"""
if not cssvalue:
return u''
else:
out = Out(self)
for item in cssvalue.seq:
type_, val = item.type, item.value
#val = self._possiblezero(cssvalue, type_, val)
# do no send type_ so no special cases!
out.append(val, None, space=False)
return out.value()
def do_css_CSSVariable(self, variable, IGNORED=False):
"""Serializes a CSSVariable"""
if not variable or not variable.name:
return u''
else:
out = Out(self)
v = variable.value
if self.prefs.resolveVariables and v:
# resolve variable
out.append(v)
else:
# keep var(NAME)
out.append(u'var(', 'FUNCTION')
out.append(variable.name, 'IDENT')
if variable.fallback:
out.append(u',', 'COMMA')
out.append(variable.fallback.cssText)
out.append(u')')
return out.value()
def do_stylesheets_medialist(self, medialist):
"""
comma-separated list of media, default is 'all'
If "all" is in the list, every other media *except* "handheld" will
be stripped. This is because how Opera handles CSS for PDAs.
"""
if len(medialist) == 0:
return u'all'
else:
seq = medialist.seq
out = Out(self)
firstdone = False
for item in seq:
type_, val = item.type, item.value
if type_ == 'MediaQuery':
if firstdone:
out.append(u',', 'CHAR')
else:
firstdone = True
out.append(item.value, item.type)
return out.value()
def do_stylesheets_mediaquery(self, mediaquery):
"""
a single media used in medialist
"""
if not mediaquery.wellformed:
return u''
else:
withsemi = []
nextmq = False
for item in mediaquery.seq:
type_, val = item.type, item.value
if type_ == 'MediaQuery' and nextmq:
withsemi.append(('CHAR', u','))
nextmq = False
else:
nextmq = True
withsemi.append((type_, val))
out = Out(self)
for t, v, in withsemi:
out.append(v, t)
#for item in mediaquery.seq:
# type_, val = item.type, item.value
# out.append(val, type_)#, space=False)
return out.value()
#if mediaquery.wellformed:
# out = []
# for part in mediaquery.seq:
# if isinstance(part, cssutils.css.Property): # Property
# out.append(u'(%s)' % part.cssText)
# elif hasattr(part, 'cssText'): # comments
# out.append(part.cssText)
# else:
# # TODO: media queries!
# out.append(part)
# return u' '.join(out)
#else:
# return u''
| 34.601323 | 93 | 0.511032 |
cb338b1f4b5e92e2b423cc4fb29af7f994dbc9d1 | 26,126 | py | Python | src/clld/web/app.py | pepe-localhost/clld | 2d698b75d2e6019619be6b01d09f0c77cb865af4 | [
"MIT"
] | 1 | 2021-03-13T15:53:44.000Z | 2021-03-13T15:53:44.000Z | src/clld/web/app.py | pepe-localhost/clld | 2d698b75d2e6019619be6b01d09f0c77cb865af4 | [
"MIT"
] | null | null | null | src/clld/web/app.py | pepe-localhost/clld | 2d698b75d2e6019619be6b01d09f0c77cb865af4 | [
"MIT"
] | null | null | null | """Common functionality of clld Apps is cobbled together here."""
import re
import uuid
import pathlib
import datetime
import functools
import importlib
import collections
from sqlalchemy import engine_from_config
from sqlalchemy.orm import joinedload, undefer
from sqlalchemy.orm.exc import NoResultFound
from webob.request import Request as WebobRequest
from zope.interface import implementer, implementedBy
from pyramid.httpexceptions import HTTPNotFound, HTTPMovedPermanently, HTTPGone
from pyramid import events
from pyramid.request import Request, reify
from pyramid.interfaces import IRoutesMapper
from pyramid.asset import abspath_from_asset_spec
from pyramid.renderers import JSON, JSONP
from pyramid.settings import asbool
from purl import URL
from clldutils.path import md5, git_describe
import clld
from clld.config import get_config
from clld.db.meta import DBSession, Base
from clld.db.models import common
from clld import Resource, RESOURCES
from clld import interfaces
from clld.web.adapters import get_adapters
from clld.web.adapters import geojson, register_resource_adapters
from clld.web.adapters.base import adapter_factory
from clld.web.adapters.cldf import CldfDownload, CldfConfig
from clld.web.views import (
index_view, resource_view, _raise, _ping, js, unapi, xpartial, redirect, gone,
select_combination,
)
from clld.web.views.olac import olac, OlacConfig
from clld.web.views.sitemap import robots, sitemapindex, sitemap, resourcemap
from clld.web.subscribers import add_renderer_globals, add_localizer, init_map
from clld.web.datatables.base import DataTable
from clld.web import datatables
from clld.web.maps import Map, ParameterMap, LanguageMap, CombinationMap
from clld.web.icon import ICONS, ORDERED_ICONS, MapMarker
from clld.web import assets
assert clld
assert assets
class ClldRequest(Request):
"""Custom Request class."""
@reify
def purl(self):
"""Access the current request's URL.
For more convenient URL manipulations, we provide the current request's URL
as `purl.URL <http://purl.readthedocs.org/en/latest/#purl.URL>`_ instance.
"""
return URL(self.url)
@reify
def admin(self):
return '__admin__' in self.params
@reify
def query_params(self):
"""Convenient access to the query parameters of the current request.
:return: dict of the query parameters of the request URL.
"""
return {k: v[0] for k, v in self.purl.query_params().items()}
@property
def db(self):
"""Convenient access to the db session.
We make the db session available as request attribute, so we do not have to
import it in templates.
"""
return DBSession
@property
def blog(self):
return self.registry.queryUtility(interfaces.IBlog)
@reify
def dataset(self):
"""Convenient access to the Dataset object.
Properties of the :py:class:`clld.db.models.common.Dataset` object an
application serves are used in various places, so we want to have a reference to
it.
"""
return self.db.query(common.Dataset).options(undefer('updated')).first()
@property
def contact_email_address(self):
if 'clld.contact' in self.registry.settings:
return self.registry.settings['clld.contact']
return self.dataset.contact # pragma: no cover
def get_datatable(self, name, model, **kw):
"""Convenient lookup and retrieval of initialized DataTable object.
:param name: Name under which the datatable class was registered.
:param model: model class to pass as initialization parameter to the datatable.
:param kw:
Keyword parameters are passed through to the initialization of the datatable.
:return:
:py:class:`clld.web.datatables.base.DataTable` instance, if a datatable was
registered for ``name``.
"""
dt = self.registry.queryUtility(interfaces.IDataTable, name=name)
if dt:
return dt(self, model, **kw)
def get_map(self, name=None, **kw):
"""Convenient lookup and retrieval of initialized Map object.
:param name: Name under which the map was registered.
:return:
:py:class:`clld.web.maps.Map` instance, if a map was registered else ``None``.
"""
if name is None and self.matched_route:
name = self.matched_route.name
if name:
map_ = self.registry.queryUtility(interfaces.IMap, name=name)
if map_:
return map_(self.context, self, **kw)
def _route(self, obj, rsc, **kw):
"""Determine the name of the canonical route for a resource instance.
The resource may be specified as object or as mapper class and id.
:return:
pair (route_name, kw) suitable as arguments for the Request.route_url method.
"""
if rsc is None:
for _rsc in RESOURCES:
if _rsc.interface.providedBy(obj):
rsc = _rsc
break
assert rsc
route = rsc.name
if 'ext' in kw:
route += '_alt'
# if rsc is passed explicitely, we allow the object id to be passed in as obj,
# to make it possible to create resource URLs without having the "real" object.
kw.setdefault('id', getattr(obj, 'id', obj))
return route, kw
def ctx_for_url(self, url):
"""Method to reverse URL generation for resources.
I.e. given a URL, tries to determine the associated resource.
:return: model instance or ``None``.
"""
mapper = self.registry.getUtility(IRoutesMapper)
_path = URL(url).path()
info = mapper(WebobRequest({'PATH_INFO': _path}))
if not info['route']:
# FIXME: hack to cater to deployments under a path prefix
info = mapper(WebobRequest({'PATH_INFO': re.sub(r'^\/[a-z]+', '', _path)}))
if info['route']:
for rsc in RESOURCES:
if rsc.name == info['route'].name:
if rsc.name == 'dataset':
return self.dataset
if info['match']:
return rsc.model.get(info['match']['id'], default=None)
def resource_url(self, obj, rsc=None, **kw):
"""Get the absolute URL for a resource.
:param obj:
A resource or the id of a resource; in the latter case ``rsc`` must be passed.
:param rsc: A registered :py:class:`clld.Resource`.
:param kw:
Keyword parameters are passed through to
`pyramid.request.Request.route_url <http://docs.pylonsproject.org/projects/\
pyramid/en/1.0-branch/api/request.html#pyramid.request.Request.route_url>`_
:return: URL
"""
route, kw = self._route(obj, rsc, **kw)
return self.route_url(route, **kw)
def route_url(self, route, *args, **kw):
"""Facade for Request.route_url, hacking in support for admin routes."""
if self.admin:
if '_query' not in kw:
kw['_query'] = {}
kw['_query']['__admin__'] = '1'
if '__locale__' in self.params:
if '_query' not in kw:
kw['_query'] = {}
kw['_query']['__locale__'] = self.params['__locale__']
return Request.route_url(self, route, *args, **kw)
def resource_path(self, obj, rsc=None, **kw):
"""Determine the path component of a Resource's URL."""
route, kw = self._route(obj, rsc, **kw)
return self.route_path(route, **kw)
def file_ospath(self, file_):
if 'clld.files' in self.registry.settings:
return str(self.registry.settings['clld.files'].joinpath(file_.relpath))
def file_url(self, file_):
if 'url' in file_.jsondata:
# just a preparation for full support of non-local files
return file_.jsondata['url'] # pragma: no cover
if 'clld.files' in self.registry.settings:
return self.static_url(self.file_ospath(file_))
def menu_item(route_name, ctx, req, label=None):
"""Factory function for a menu item specified by route name.
:return: A pair (URL, label) to create a menu item.
"""
return req.route_url(route_name), label or req.translate(route_name.capitalize())
@implementer(interfaces.ICtxFactoryQuery)
class CtxFactoryQuery(object):
"""Implements reasonable default queries to be used in context factories.
By reasonable we mean providing good performance for typical data sizes. Applications
with a-typical numbers of any resource class may have to implement a custom class
for the ICtxFactoryQuery interface. Usually this will be a class derived from
CtxFactoryQuery.
"""
def refined_query(self, query, model, req):
"""To be overridden.
Derived classes may override this method to add model-specific query
refinements of their own.
"""
return query
def __call__(self, model, req):
query = req.db.query(model).filter(model.id == req.matchdict['id'])
custom_query = self.refined_query(query, model, req)
if query == custom_query:
# no customizations done, apply the defaults
f = getattr(model, 'refine_factory_query', None)
if f:
query = f(query)
else:
if model == common.Contribution:
query = query.options(
joinedload(
common.Contribution.valuesets
).joinedload(
common.ValueSet.parameter
),
joinedload(
common.Contribution.valuesets
).joinedload(
common.ValueSet.values
).joinedload(
common.Value.domainelement
),
joinedload(
common.Contribution.references
).joinedload(
common.ContributionReference.source
),
joinedload(
common.Contribution.data
)
)
else:
query = custom_query # pragma: no cover
return query.one()
def ctx_factory(model, type_, req):
"""Factory function for request contexts.
The context of a request is either a single model instance or an instance of
DataTable incorporating all information to retrieve an appropriately filtered list
of model instances.
"""
def replacement(id_):
raise HTTPMovedPermanently(
location=req.route_url(model.__name__.lower(), id=id_))
if type_ == 'index':
datatable = req.registry.getUtility(
interfaces.IDataTable, name=req.matched_route.name)
return datatable(req, model)
try:
if model == common.Dataset:
ctx = req.db.query(model).one()
elif model == common.Combination:
ctx = common.Combination.get(req.matchdict['id'])
else:
ctx = req.registry.getUtility(interfaces.ICtxFactoryQuery)(model, req)
if ctx.replacement_id:
return replacement(ctx.replacement_id)
ctx.metadata = get_adapters(interfaces.IMetadata, ctx, req)
return ctx
except NoResultFound:
if req.matchdict.get('id'):
replacement_id = common.Config.get_replacement_id(model, req.matchdict['id'])
if replacement_id:
if replacement_id == common.Config.gone:
raise HTTPGone()
return replacement(replacement_id)
raise HTTPNotFound()
def maybe_import(name, pkg_dir=None):
exists = False
if pkg_dir:
rel_path = name.split('.')[1:] if '.' in name else []
rel_path.append('__init__.py')
exists = pkg_dir.joinpath(*rel_path).exists()
if not exists:
rel_path.pop()
if rel_path:
rel_path[-1] += '.py'
exists = pkg_dir.joinpath(*rel_path).exists()
try:
return importlib.import_module(name)
except ImportError:
if pkg_dir and exists:
print('failed to import existing module {0}'.format(name))
raise
return None
#
# configurator directives:
#
def register_utility(config, cls, interface, name='', overwrite=True):
if overwrite or not config.registry.queryUtility(interface, name=name):
config.registry.registerUtility(cls, provided=interface, name=name)
def register_cls(interface, config, route, cls, overwrite=True):
register_utility(config, cls, interface, name=route, overwrite=overwrite)
if not route.endswith('_alt'):
register_utility(config, cls, interface, name=route + '_alt', overwrite=overwrite)
def register_adapter(config, cls, from_, to_=None, name=None):
if isinstance(cls, dict):
cls = adapter_factory(**cls)
to_ = to_ or list(implementedBy(cls))[0]
name = name or cls.mimetype
config.registry.registerAdapter(cls, (from_,), to_, name=name)
def register_adapters(config, specs):
for interface, base, mimetype, extension, template, extra in specs:
extra.update(base=base, mimetype=mimetype, extension=extension, template=template)
config.register_adapter(extra, interface, name=mimetype)
def register_menu(config, *items):
"""Register an item for the main menu.
:param items: An item may be a (name, factory) pair, where factory is a callable that\
accepts the two parameters (ctx, req) and returns a pair (url, label) to use for the\
menu link; or a route name, or a pair (route name, dict), where dict is used as\
keyword arguments for menu_item.
"""
menuitems = collections.OrderedDict()
for item in items:
if isinstance(item, str):
item = (item, {})
name, factory = item
if isinstance(factory, dict):
factory = functools.partial(menu_item, name, **factory)
menuitems[name] = factory
config.registry.registerUtility(menuitems, interfaces.IMenuItems)
def add_route_and_view(config, route_name, route_pattern, view, **kw):
"""Add a route and a corresponding view and appropriate default routes and views.
.. note:: To allow custom route patterns we look them up in a dict in settings.
"""
route_patterns = config.registry.settings.get('route_patterns', {})
route_pattern = route_patterns.get(route_name, route_pattern)
alt_route_pattern = kw.pop('alt_route_pattern', route_pattern + '.{ext}')
route_kw = {}
factory = kw.pop('factory', None)
if factory:
route_kw['factory'] = factory
config.add_route(route_name, route_pattern, **route_kw)
config.add_view(view, route_name=route_name, **kw)
config.add_route(route_name + '_alt', alt_route_pattern, **route_kw)
config.add_view(view, route_name=route_name + '_alt', **kw)
def register_resource_routes_and_views(config, rsc):
kw = dict(factory=functools.partial(ctx_factory, rsc.model, 'rsc'))
if rsc.model == common.Dataset:
pattern = '/'
kw['alt_route_pattern'] = '/void.{ext}'
else:
pattern = r'/%s/{id:[^/\.]+}' % rsc.plural
config.add_route_and_view(rsc.name, pattern, resource_view, **kw)
if rsc.with_index:
config.add_route_and_view(
rsc.plural,
'/%s' % rsc.plural,
index_view,
factory=functools.partial(ctx_factory, rsc.model, 'index'))
def register_resource(config, name, model, interface, with_index=False, **kw):
"""Directive to register custom resources.
.. note::
The directive accepts arbitrary keyword arguments for backwards compatibility.
"""
# in case of tests, this method may be called multiple times!
if [rsc for rsc in RESOURCES if rsc.name == name]:
return
rsc = Resource(name, model, interface, with_index=with_index)
RESOURCES.append(rsc)
config.register_resource_routes_and_views(rsc)
if not config.registry.queryUtility(interfaces.IDataTable, name=rsc.plural):
config.register_datatable(
rsc.plural, getattr(datatables, rsc.plural.capitalize(), DataTable))
register_resource_adapters(config, rsc)
def register_download(config, download):
config.registry.registerUtility(download, interfaces.IDownload, name=download.name)
StaticResource = collections.namedtuple('StaticResource', 'type asset_spec')
def register_staticresource(config, type, asset_spec):
config.registry.registerUtility(
StaticResource(type, asset_spec), interfaces.IStaticResource, name=asset_spec)
def add_settings_from_file(config, file_):
if file_.exists():
cfg = get_config(file_)
if 'mako.directories_list' in cfg:
cfg['mako.directories'] = cfg['mako.directories_list'] # pragma: no cover
config.add_settings(cfg)
def _route_and_view(config, pattern, view, name=None):
name = name or str(uuid.uuid4())
config.add_route(name, pattern)
config.add_view(view, route_name=name)
def add_301(config, pattern, location, name=None):
_route_and_view(
config, pattern, xpartial(redirect, HTTPMovedPermanently, location), name=name)
def add_410(config, pattern, name=None):
_route_and_view(config, pattern, gone, name=name)
def add_page(config, name, pattern=None, view=None, template=None, views=None):
views = views or maybe_import('%s.views' % config.root_package.__name__)
config.add_route_and_view(
name,
pattern or '/' + name,
view or getattr(views, name, lambda r: {}),
renderer=template or name + '.mako')
def includeme(config):
"""Upgrading:
- register utilities "by hand", after config.include('clld.web.app')
- add routes by hand (and remove these from the **kw passed to Configurator)
:param config:
:return:
"""
#
# now we exploit the default package layout as created via the CLLD scaffold:
#
# note: the following exploits the import time side effect of modifying the webassets
# environment!
root_package = config.root_package.__name__
pkg_dir = pathlib.Path(config.root_package.__file__).parent.resolve()
maybe_import('%s.assets' % root_package, pkg_dir=pkg_dir)
json_renderer = JSON()
json_renderer.add_adapter(datetime.datetime, lambda obj, req: obj.isoformat())
json_renderer.add_adapter(datetime.date, lambda obj, req: obj.isoformat())
config.add_renderer('json', json_renderer)
jsonp_renderer = JSONP(param_name='callback')
jsonp_renderer.add_adapter(datetime.datetime, lambda obj, req: obj.isoformat())
jsonp_renderer.add_adapter(datetime.date, lambda obj, req: obj.isoformat())
config.add_renderer('jsonp', jsonp_renderer)
config.set_request_factory(ClldRequest)
config.registry.registerUtility(CtxFactoryQuery(), interfaces.ICtxFactoryQuery)
config.registry.registerUtility(OlacConfig(), interfaces.IOlacConfig)
config.registry.registerUtility(CldfConfig(), interfaces.ICldfConfig)
# initialize the db connection
engine = engine_from_config(config.registry.settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
try:
git_tag = git_describe(pathlib.Path(pkg_dir).parent)
except ValueError: # pragma: no cover
git_tag = None
config.add_settings({
'pyramid.default_locale_name': 'en',
'clld.pkg': root_package,
'clld.git_tag': git_tag,
'clld.parameters': {}})
if 'clld.files' in config.registry.settings:
# deployment-specific location of static data files
abspath = pathlib.Path(config.registry.settings['clld.files']).resolve()
config.add_settings({'clld.files': abspath})
config.add_static_view('files', str(abspath))
# event subscribers:
config.add_subscriber(add_localizer, events.NewRequest)
config.add_subscriber(init_map, events.ContextFound)
config.add_subscriber(
functools.partial(
add_renderer_globals,
maybe_import('%s.util' % root_package, pkg_dir=pkg_dir)),
events.BeforeRender)
#
# make it easy to register custom functionality
#
for name, func in {
'register_utility': register_utility,
'register_datatable': functools.partial(register_cls, interfaces.IDataTable),
'register_map': functools.partial(register_cls, interfaces.IMap),
'register_menu': register_menu,
'register_resource': register_resource,
'register_adapter': register_adapter,
'register_adapters': register_adapters,
'register_download': register_download,
'register_staticresource': register_staticresource,
'add_route_and_view': add_route_and_view,
'add_settings_from_file': add_settings_from_file,
'add_301': add_301,
'add_410': add_410,
'add_page': add_page,
'register_resource_routes_and_views': register_resource_routes_and_views,
}.items():
config.add_directive(name, func)
#
# routes and views
#
config.add_static_view('clld-static', 'clld:web/static')
config.add_static_view('static', '%s:static' % root_package)
config.add_route_and_view('_js', '/_js', js, http_cache=3600)
# add some maintenance hatches
config.add_route_and_view('_raise', '/_raise', _raise)
config.add_route_and_view('_ping', '/_ping', _ping, renderer='json')
# sitemap support:
config.add_route_and_view('robots', '/robots.txt', robots)
config.add_route_and_view('sitemapindex', '/sitemap.xml', sitemapindex)
config.add_route_and_view('sitemap', '/sitemap.{rsc}.{n}.xml', sitemap)
config.add_route('resourcemap', '/resourcemap.json')
config.add_view(resourcemap, route_name='resourcemap', renderer='jsonp')
config.add_route_and_view(
'select_combination', '/_select_combination', select_combination)
config.add_route_and_view('unapi', '/unapi', unapi)
config.add_route_and_view('olac', '/olac', olac)
config.add_settings_from_file(pkg_dir.joinpath('appconf.ini'))
if not config.registry.settings.get('mako.directories'):
config.add_settings({'mako.directories': ['clld:web/templates']})
for rsc in RESOURCES:
config.register_resource_routes_and_views(rsc)
config.register_datatable(
rsc.plural, getattr(datatables, rsc.plural.capitalize(), DataTable))
register_resource_adapters(config, rsc)
# maps
config.register_map('languages', Map)
config.register_map('language', LanguageMap)
config.register_map('parameter', ParameterMap)
config.register_map('combination', CombinationMap)
config.include('clld.web.adapters')
for icon in ICONS:
config.registry.registerUtility(icon, interfaces.IIcon, name=icon.name)
config.registry.registerUtility(ORDERED_ICONS, interfaces.IIconList)
config.registry.registerUtility(MapMarker(), interfaces.IMapMarker)
#
# inspect default locations for views and templates:
#
home_comp = collections.OrderedDict()
for name, template in [
('introduction', False),
('about', False),
('terms', False),
('glossary', False),
('history', False),
('changes', False),
('credits', False),
('legal', True),
('download', True),
('contact', True),
('help', False),
]:
home_comp[name] = template
if pkg_dir.joinpath('templates').exists():
for p in pkg_dir.joinpath('templates').iterdir():
if p.stem in home_comp and p.suffix == '.mako':
home_comp[p.stem] = True
for name, template in home_comp.items():
if template:
config.add_page(name)
config.add_settings({'home_comp': [k for k in home_comp.keys() if home_comp[k]]})
if 'clld.favicon' not in config.registry.settings:
favicon = {'clld.favicon': 'clld:web/static/images/favicon.ico'}
# hard to test (in particular on travis) and without too much consequence
# (and the consequences faced are easy to spot).
if pkg_dir.joinpath('static', 'favicon.ico').exists(): # pragma: no cover
favicon['clld.favicon'] = root_package + ':static/favicon.ico'
config.add_settings(favicon)
config.add_settings({
'clld.favicon_hash': md5(abspath_from_asset_spec(
config.registry.settings['clld.favicon']))})
translation_dirs = ['clld:locale']
if pkg_dir.joinpath('locale').exists():
translation_dirs.append('%s:locale' % root_package) # pragma: no cover
config.add_translation_dirs(*translation_dirs)
if pkg_dir.joinpath('static/publisher_logo.png').exists(): # pragma: no cover
config.add_settings(
{'clld.publisher_logo': '%s:static/publisher_logo.png' % root_package})
if asbool(config.registry.settings.get('clld.pacific_centered_maps')):
geojson.pacific_centered()
v = maybe_import('%s.views' % root_package, pkg_dir=pkg_dir)
if v:
config.scan(v) # pragma: no cover
menuitems = config.registry.settings.get(
'clld.menuitems_list',
['contributions', 'parameters', 'languages', 'contributors'])
config.register_menu(
('dataset', lambda ctx, req: (req.resource_url(req.dataset), req.translate('Home'))),
*menuitems)
config.include('pyramid_mako')
for name in ['adapters', 'datatables', 'maps']:
mod = maybe_import('%s.%s' % (root_package, name), pkg_dir=pkg_dir)
if mod and hasattr(mod, 'includeme'):
config.include(mod)
config.register_download(CldfDownload(common.Dataset, root_package))
| 37.216524 | 93 | 0.653181 |
cf31b8e357da352e6be0d4edeb612efcf9adc9c8 | 2,246 | py | Python | src/snc/simulation/store_data/numpy_encoder.py | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
] | 5 | 2021-03-24T16:23:10.000Z | 2021-11-17T12:44:51.000Z | src/snc/simulation/store_data/numpy_encoder.py | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
] | 3 | 2021-03-26T01:16:08.000Z | 2021-05-08T22:06:47.000Z | src/snc/simulation/store_data/numpy_encoder.py | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
] | 2 | 2021-03-24T17:20:06.000Z | 2021-04-19T09:01:12.000Z | from copy import deepcopy
import cvxpy as cvx
import json
import numpy as np
import re
from typing import Any, Dict
class NumpyEncoder(json.JSONEncoder):
"""
A JSON encoder that can handle numpy objects and convert to json.
"""
# Example taken from python.org docs
def default(self, o): # pylint: disable=E0202
if hasattr(o, 'to_serializable'):
return o.to_serializable()
elif isinstance(o, set):
return list(o)
elif isinstance(o, np.integer):
return int(o)
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.random.RandomState):
return str(o)
return json.JSONEncoder.default(self, o)
def format_json_with_np(string: str) -> str:
"""
JSON dumps all items on new lines. This regex makes a string of indented json put innermost
arrays on same line.
:param string: a string of json.
:return: s: string of indented json.
"""
s = re.sub(r'([-0-9],)\n\s*(?=[-0-9])', r'\1 ', string)
s = re.sub(r'\[\n\s*([-0-9])', r'[\1', s)
s = re.sub(r'([-0-9])\n\s*\]', r'\1]', s)
return s
def clean_to_serializable(self: Any) -> Dict:
d = deepcopy(self.__dict__)
return iterate_dict(d)
def is_not_valid_json_value(value) -> bool:
invalid_instances = (
cvx.Parameter,
cvx.Problem,
cvx.Variable,
cvx.constraints.Inequality,
cvx.constraints.Equality,
type
)
return isinstance(value, invalid_instances) or callable(value)
def iterate_dict(o):
d = deepcopy(o)
for key, value in o.items():
if is_not_valid_json_value(value):
d.pop(key)
elif isinstance(value, dict):
d[key] = iterate_dict(value)
elif isinstance(value, list):
d[key] = iterate_list(value)
return d
def iterate_list(o):
clr = []
for i, value in enumerate(o):
if is_not_valid_json_value(value):
clr.append(i)
elif isinstance(value, list):
o[i] = iterate_list(value)
elif isinstance(value, dict):
o[i] = iterate_dict(value)
clr.sort(reverse=True)
for i in clr:
o.pop(i)
return o
| 26.738095 | 95 | 0.596171 |
1138d719b0fac41d5b0ebf0f7c11363212419baf | 6,984 | py | Python | build/lib/XMC/loaders/loader_libsvm.py | Stomach-ache/GLaS | 253092cce1922711e7d9c9df601f117f3ec56e0c | [
"MIT"
] | 2 | 2020-07-13T05:13:50.000Z | 2021-04-26T12:09:32.000Z | build/lib/XMC/loaders/loader_libsvm.py | Stomach-ache/GLaS | 253092cce1922711e7d9c9df601f117f3ec56e0c | [
"MIT"
] | 4 | 2020-01-10T10:54:52.000Z | 2020-01-15T10:14:15.000Z | build/lib/XMC/loaders/loader_libsvm.py | pyschedelicsid/GlasXC | eb3077d7c1aa9ca25798b4c5d5dd95ce36fe5a4e | [
"MIT"
] | 2 | 2021-01-26T07:27:16.000Z | 2021-04-28T13:38:03.000Z | import os
import torch
import itertools
import numpy as np
import torch.utils.data
from scipy.sparse import csr_matrix
def _parse_data_point(data_point):
"""
Function to parse a row representing one input-output(s) pair in the LIBSVM format
Args:
data_point : row string
Returns:
(class_list, cols, vals) : class_list is the set of outputs for input,
cols are the indices of values
vals are the values taken at the indices
"""
class_list = np.empty(0).astype(np.int32)
elems = data_point.split(' ')
num_nonzero_features = len(elems) - 1
if elems[0] != '':
class_list = np.array(list(map(int, elems[0].split(','))))
elems = elems[1:]
cols = np.empty(num_nonzero_features).astype(np.int32)
vals = np.empty(num_nonzero_features).astype(np.float32)
for i in range(num_nonzero_features):
idx, val = elems[i].split(':')
cols[i] = int(idx)
vals[i] = float(val)
return class_list, cols, vals
class LibSVMLoader(torch.utils.data.Dataset):
"""
Class for a dataset in the LibSVM format.
"""
def __init__(self, file_path=None, dataset_info=None, feature_matrix=None, class_matrix=None):
"""
Initializes the loader. Either file_path and dataset_info, or feature_matrix and
class_matrix, must be provided. If both are provided, data is loaded from the file.
Args:
file_path : Path to the file containing the dataset. The file should only consists of
rows of datum in the LibSVM format.
dataset_info : Dictionary consisting of three fields `num_data_points`, `input_dims`
and `output_dims`.
feature_matrix : Precomputed feature_matrix.
class_matrix : Precomputed class_matrix.
"""
assert (file_path is not None and dataset_info is not None) or (
feature_matrix is not None and class_matrix is not None), \
"Either file path, or feature and class matrices must be specified"
if file_path is not None:
assert os.path.isfile(file_path), file_path + " does not exist!"
self.num_data_points = dataset_info['num_data_points']
self.input_dims = dataset_info['input_dims']
self.output_dims = dataset_info['output_dims']
with open(file_path, 'r') as f:
data = f.readlines()
assert self.num_data_points == len(data), "Mismatch in number of data points"
class_matrix = []
data_rows_matrix = []
class_rows_matrix = []
cols_matrix = []
data_matrix = []
for i in range(self.num_data_points):
class_list, cols, vals = _parse_data_point(data[i])
class_matrix.append(class_list)
cols_matrix.append(cols)
data_matrix.append(vals)
data_rows_matrix.append(np.full(len(cols_matrix[i]), i))
class_rows_matrix.append(np.full(len(class_matrix[i]), i))
class_matrix = list(itertools.chain.from_iterable(class_matrix))
data_rows_matrix = list(itertools.chain.from_iterable(data_rows_matrix))
class_rows_matrix = list(itertools.chain.from_iterable(class_rows_matrix))
cols_matrix = list(itertools.chain.from_iterable(cols_matrix))
data_matrix = list(itertools.chain.from_iterable(data_matrix))
assert len(data_matrix) == len(data_rows_matrix) and len(
data_matrix) == len(cols_matrix)
assert len(class_rows_matrix) == len(class_matrix)
self.features = csr_matrix((data_matrix, (data_rows_matrix, cols_matrix)),
shape=(self.num_data_points, self.input_dims),
dtype=np.float32)
self.classes = csr_matrix((np.ones(len(class_rows_matrix)),
(class_rows_matrix, class_matrix)),
shape=(self.num_data_points, self.output_dims))
else:
assert feature_matrix.get_shape()[0] == class_matrix.get_shape()[0],\
"Mismatch in number of features and classes"
self.num_data_points = feature_matrix.get_shape()[0]
self.input_dims = feature_matrix.get_shape()[1]
self.output_dims = feature_matrix.get_shape()[1]
self.features = feature_matrix
self.classes = class_matrix
def __len__(self):
return self.num_data_points
def __getitem__(self, idx):
return (torch.from_numpy(self.features[idx].todense()).view(-1),
torch.from_numpy(self.classes[idx].todense()).view(-1))
def __repr__(self):
fmt_str = 'Sparse dataset of size \
({0} x {1}), ({0} x {2})'.format(len(self), self.input_dims, self.output_dims)
fmt_str += ' in LIBSVM format'
return fmt_str
def train_test_split(self, test_fraction=0.2, random_seed=42):
"""
Function to split the data randomly into train and test splits in a specified ratio.
Args:
test_fraction : Fraction of elements to keep in the test set.
random_seed : Random seed for shuffling data before splitting.
Returns:
(train_loader, test_loader) : Loaders containing the train and test splits respectively
"""
assert test_fraction >= 0.0 and test_fraction <= 1.0,\
"Test set fraction must lie in [0, 1]"
np.random.seed(random_seed)
permutation = np.random.permutation(np.arange(self.num_data_points))
permuted_features = self.features[permutation]
permuted_classes = self.classes[permutation]
split_index = int(self.num_data_points * (1 - test_fraction))
train_loader = LibSVMLoader(feature_matrix=permuted_features[:split_index],
class_matrix=permuted_classes[:split_index])
test_loader = LibSVMLoader(feature_matrix=permuted_features[split_index:],
class_matrix=permuted_classes[split_index:])
return train_loader, test_loader
def get_data(self):
"""
Function to get the entire dataset
"""
return (self.features, self.classes)
def get_features(self):
"""
Function to get the entire set of features
"""
return self.features
def get_classes(self):
"""
Function to get the entire set of classes
"""
return self.classes
def num_classes(self):
"""
Function to get number of classes in the dataset
"""
return self.output_dims
| 40.842105 | 99 | 0.601518 |
366a71db6569a4e5bb7d2a39506b9253fd14e63a | 3,236 | py | Python | testAppium/unit/testcase/result_activity/result_activity.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | testAppium/unit/testcase/result_activity/result_activity.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | testAppium/unit/testcase/result_activity/result_activity.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/env python
# -*- coding = utf-8 -*-
# @Author:wanghui
# @Time:
# @File:result_activity.py
import os
from time import sleep
from testAppium.unit.common.webdriverUnit import WebdriverUnit
from testAppium.unit.common import toolUnits
from testAppium.unit.testcase.result_activity import result_units
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class ResultActivity(WebdriverUnit):
"""结果页测试用例"""
def setUp(self):
self.start_driver()
print('开始执行--结果页用例集')
self.find_element_id_and_click_wait(self.ele.SY_HomepageNavTab)
result_units.input_result(self)
def test_03_109_030017_enter_lodge_unit_detail(self):
"""进入房源详情页"""
self.find_element_id_and_click_wait(self.ele.SY_search_widget_btn_search)
toolUnits.wait(self.driver, self.ele.JGY_tv_search_list_item_title, 5)
house_name = self.get_text(self.ele.JGY_tv_search_list_item_title)
self.find_element_id_and_click_wait(self.ele.JGY_iv_loop_image_item)
self.assertEqual(house_name, self.get_text(self.ele.XQY_tv_lodge_detail_unit_name), '进入详情页失败')
self.backlast(2)
def test_03_256_InputChinese_SearchInList(self):
"""结果页添加地标位置推荐正常"""
self.find_element_id_and_click_wait(self.ele.SY_search_widget_btn_search)
toolUnits.wait(self.driver, self.ele.JGY_tv_search_list_item_title, 5)
self.find_element_id_and_click_wait(self.ele.JGY_tv_result_page_input_box)
localName = "天通苑"
self.find_element_id_and_send_keys(self.ele.SS_et_search_input, localName)
self.find_element_id_and_click_wait(self.ele.SS_pop_fxs_title)
# self.find_element_text_and_click_wait('推荐排序')
self.find_element_id_and_click_wait(self.ele.JGY_rl_indicator_check_time, -1)
self.find_element_id_and_click_wait(self.ele.JGY_search_filter_orderBy_distance_tv)
toolUnits.wait(self.driver, self.ele.JGY_tv_search_list_item_title, timeouts=3)
houseName= self.get_text(self.ele.JGY_tv_search_list_item_title)
# self.assertIn(localName, houseName, '输入地标位置搜索结果失败')
self.find_element_id_and_click_wait(self.ele.JGY_iv_result_page_clear_input)
self.assertIsNot(houseName, self.get_text(self.ele.JGY_tv_search_list_item_title))
self.backlast(1)
def test_03_272_030174_SearchInMap(self):
"""切换至地图页面操作"""
self.find_element_id_and_click_wait(self.ele.SY_search_widget_btn_search)
toolUnits.wait(self.driver, self.ele.JGY_iv_result_page_mod_switch, 5)
self.find_element_id_and_click_wait(self.ele.JGY_iv_result_page_mod_switch)
self.find_element_id_and_click_wait(self.ele.JGY_tv_result_page_input_box)
localName = "天通苑"
self.find_element_id_and_send_keys(self.ele.SS_et_search_input, localName)
self.find_element_id_and_click_wait(self.ele.SS_pop_fxs_title, timeout=3)
print(self.get_text(self.ele.JGY_map_address_background))
self.assertEqual(localName, self.get_text(self.ele.JGY_map_address_background), '地图页输入地标搜索失败')
self.backlast(1)
def tearDown(self):
print('结束执行--结果页用例集')
self.driver.quit()
if __name__ == '__main__':
pass
| 42.025974 | 102 | 0.751545 |
459fc0a9659b9cc3d80dd4a2bb0aad136ef9c603 | 3,183 | py | Python | DPGAnalysis/Skims/python/cosmicTPSkim_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DPGAnalysis/Skims/python/cosmicTPSkim_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DPGAnalysis/Skims/python/cosmicTPSkim_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAny_cfi import *
cosmictrackfinderP5TkCntFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('cosmictrackfinderP5'),
minNumber = cms.uint32(1)
)
ctfWithMaterialTracksP5TkCntFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('ctfWithMaterialTracksP5'),
minNumber = cms.uint32(1)
)
rsWithMaterialTracksP5TkCntFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('rsWithMaterialTracksP5'),
minNumber = cms.uint32(1)
)
cosmicMuonsBarrelOnlyTkFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("cosmicMuons"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(90.0),
maxZ = cms.double(130.0),
)
cosmicMuonsEndCapsOnlyTkFilter = cosmicMuonsBarrelOnlyTkFilter.clone(SALabel = cms.InputTag("cosmicMuonsEndCapsOnly"))
cosmicMuonsTkFilter = cosmicMuonsBarrelOnlyTkFilter.clone(SALabel = cms.InputTag("cosmicMuons"))
cosmicMuons1LegTkFilter = cosmicMuonsBarrelOnlyTkFilter.clone(SALabel = cms.InputTag("cosmicMuons1Leg"))
globalCosmicMuonsBarrelOnlyTkFilter = cosmicMuonsBarrelOnlyTkFilter.clone(SALabel = cms.InputTag("globalCosmicMuons"))
globalCosmicMuonsEndCapsOnlyTkFilter = cosmicMuonsBarrelOnlyTkFilter.clone(SALabel = cms.InputTag("globalCosmicMuons"))
globalCosmicMuonsTkFilter = cosmicMuonsBarrelOnlyTkFilter.clone(SALabel = cms.InputTag("globalCosmicMuons"))
globalCosmicMuons1LegTkFilter = cosmicMuonsBarrelOnlyTkFilter.clone(SALabel = cms.InputTag("globalCosmicMuons1Leg"))
cosmicMuonsBarrelOnlyTkSequence = cms.Sequence(cosmicMuonsBarrelOnlyTkFilter)
cosmicMuonsEndCapsOnlyTkSequence = cms.Sequence(cosmicMuonsEndCapsOnlyTkFilter)
cosmicMuonsTkSequence = cms.Sequence(cosmicMuonsTkFilter)
cosmicMuons1LegTkSequence = cms.Sequence(cosmicMuons1LegTkFilter)
globalCosmicMuonsBarrelOnlyTkSequence = cms.Sequence(globalCosmicMuonsBarrelOnlyTkFilter)
globalCosmicMuonsEndCapsOnlyTkSequence = cms.Sequence(globalCosmicMuonsEndCapsOnlyTkFilter)
globalCosmicMuonsTkSequence = cms.Sequence(globalCosmicMuonsTkFilter)
globalCosmicMuons1LegTkSequence = cms.Sequence(globalCosmicMuons1LegTkFilter)
cosmictrackfinderP5TkCntSequence = cms.Sequence(cosmictrackfinderP5TkCntFilter)
ctfWithMaterialTracksP5TkCntSequence = cms.Sequence(ctfWithMaterialTracksP5TkCntFilter)
rsWithMaterialTracksP5TkCntSequence = cms.Sequence(rsWithMaterialTracksP5TkCntFilter)
| 64.959184 | 124 | 0.659441 |
ec3f3c4a5b72492d3c8e2c98dc0b52818b70ce63 | 1,775 | py | Python | angr/procedures/definitions/win32_xaudio2_8.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_xaudio2_8.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_xaudio2_8.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("xaudio2_8.dll")
prototypes = \
{
#
'CreateFX': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Guid"), offset=0), SimTypePointer(SimTypeBottom(label="IUnknown"), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["clsid", "pEffect", "pInitDat", "InitDataByteSize"]),
#
'XAudio2CreateWithVersionInfo': SimTypeFunction([SimTypePointer(SimTypeBottom(label="IXAudio2"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["ppXAudio2", "Flags", "XAudio2Processor", "ntddiVersion"]),
#
'CreateAudioVolumeMeter': SimTypeFunction([SimTypePointer(SimTypeBottom(label="IUnknown"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["ppApo"]),
#
'CreateAudioReverb': SimTypeFunction([SimTypePointer(SimTypeBottom(label="IUnknown"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["ppApo"]),
}
lib.set_prototypes(prototypes)
| 59.166667 | 353 | 0.734085 |
a971d5bdfc5268f654f8507ea432f17dbddd56b4 | 2,603 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/RemoveTagsRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/RemoveTagsRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/RemoveTagsRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class RemoveTagsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'RemoveTags','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_ResourceId(self):
return self.get_query_params().get('ResourceId')
def set_ResourceId(self,ResourceId):
self.add_query_param('ResourceId',ResourceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType) | 36.152778 | 89 | 0.749904 |
e9b68dc18850312aed7abc03e4d32aa9db2cb939 | 5,666 | py | Python | newrelic_plugin_agent/plugins/apache_httpd.py | hakonber/newrelic-plugin-agent | 38f7cc011f1b279a4afbf9c7e932fbc5f99475f9 | [
"BSD-3-Clause"
] | null | null | null | newrelic_plugin_agent/plugins/apache_httpd.py | hakonber/newrelic-plugin-agent | 38f7cc011f1b279a4afbf9c7e932fbc5f99475f9 | [
"BSD-3-Clause"
] | null | null | null | newrelic_plugin_agent/plugins/apache_httpd.py | hakonber/newrelic-plugin-agent | 38f7cc011f1b279a4afbf9c7e932fbc5f99475f9 | [
"BSD-3-Clause"
] | 1 | 2021-02-17T09:47:53.000Z | 2021-02-17T09:47:53.000Z | """
ApacheHTTPD Support
"""
import logging
import re
from newrelic_plugin_agent.plugins import base
LOGGER = logging.getLogger(__name__)
PATTERN = re.compile(r'^([\w\s{1}]+):\s([\d\.{1}]+)', re.M)
class ApacheHTTPD(base.HTTPStatsPlugin):
DEFAULT_QUERY = 'auto'
GUID = 'com.meetme.newrelic_apache_httpd_agent'
KEYS = {'Total Accesses': {'type': '',
'label': 'Totals/Requests',
'suffix': 'requests'},
'BusyWorkers': {'type': 'gauge',
'label': 'Workers/Busy',
'suffix': 'workers'},
'Total kBytes': {'type': '',
'label': 'Totals/Bytes Sent',
'suffix': 'kb'},
'BytesPerSec': {'type': 'gauge',
'label': 'Bytes/Per Second',
'suffix': 'bytes/sec'},
'BytesPerReq': {'type': 'gauge',
'label': 'Requests/Average Payload Size',
'suffix': 'bytes'},
'IdleWorkers': {'type': 'gauge', 'label': 'Workers/Idle',
'suffix': 'workers'},
'CPULoad': {'type': 'gauge', 'label': 'CPU Load',
'suffix': 'processes'},
'ReqPerSec': {'type': 'gauge', 'label': 'Requests/Velocity',
'suffix': 'requests/sec'},
'Uptime': {'type': 'gauge', 'label': 'Uptime', 'suffix': 'sec'},
'ConnsTotal': {'type': 'gauge', 'label': 'Connections/Total', 'suffix': 'conns'},
'ConnsAsyncWriting': {'type': 'gauge', 'label': 'Connections/AsyncWriting', 'suffix': 'conns'},
'ConnsAsyncKeepAlive': {'type': 'gauge', 'label': 'Connections/AsyncKeepAlive', 'suffix': 'conns'},
'ConnsAsyncClosing': {'type': 'gauge', 'label': 'Connections/AsyncClosing', 'suffix': 'conns'},
'_': {'type': 'gauge', 'label': 'Scoreboard/Waiting For Conn', 'suffix': 'slots'},
'S': {'type': 'gauge', 'label': 'Scoreboard/Starting Up', 'suffix': 'slots'},
'R': {'type': 'gauge', 'label': 'Scoreboard/Reading Request', 'suffix': 'slots'},
'W': {'type': 'gauge', 'label': 'Scoreboard/Sending Reply', 'suffix': 'slots'},
'K': {'type': 'gauge', 'label': 'Scoreboard/Keepalive Read', 'suffix': 'slots'},
'D': {'type': 'gauge', 'label': 'Scoreboard/DNS Lookup', 'suffix': 'slots'},
'C': {'type': 'gauge', 'label': 'Scoreboard/Closing Conn', 'suffix': 'slots'},
'L': {'type': 'gauge', 'label': 'Scoreboard/Logging', 'suffix': 'slots'},
'G': {'type': 'gauge', 'label': 'Scoreboard/Gracefully Finishing', 'suffix': 'slots'},
'I': {'type': 'gauge', 'label': 'Scoreboard/Idle Cleanup', 'suffix': 'slots'},
'.': {'type': 'gauge', 'label': 'Scoreboard/Open Slot', 'suffix': 'slots'}}
def error_message(self):
LOGGER.error('Could not match any of the stats, please make ensure '
'Apache HTTPd is configured correctly. If you report '
'this as a bug, please include the full output of the '
'status page from %s in your ticket', self.stats_url)
def get_scoreboard(self, data):
"""Fetch the scoreboard from the stats URL
:rtype: str
"""
keys = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.']
values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
score_out = dict(zip(keys, values))
for line in data.splitlines():
if line.find('Scoreboard') != -1:
scoreboard = line.replace('Scoreboard: ','')
for i in range(0, len(scoreboard)):
score_out[scoreboard[i]] += 1
return score_out
def add_datapoints(self, stats):
"""Add all of the data points for a node
:param str stats: The stats content from Apache as a string
"""
matches = PATTERN.findall(stats or '')
for key, value in matches:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
value = 0
if key in self.KEYS:
if self.KEYS[key].get('type') == 'gauge':
self.add_gauge_value(self.KEYS[key]['label'],
self.KEYS[key].get('suffix', ''),
value)
else:
self.add_derive_value(self.KEYS[key]['label'],
self.KEYS[key].get('suffix', ''),
value)
else:
LOGGER.debug('Found unmapped key/value pair: %s = %s',
key, value)
score_data = self.get_scoreboard(stats)
for key, value in score_data.items():
if key in self.KEYS:
if self.KEYS[key].get('type') == 'gauge':
self.add_gauge_value(self.KEYS[key]['label'],
self.KEYS[key].get('suffix', ''),
value)
else:
self.add_derive_value(self.KEYS[key]['label'],
self.KEYS[key].get('suffix', ''),
value)
else:
LOGGER.debug('Found unmapped key/value pair: %s = %s',
key, value)
| 45.328 | 111 | 0.461525 |
8434b438f2ac7ed062a3f545bad540039f044429 | 3,888 | py | Python | python/kerasSlidingForecast.py | Jacopo47/ssd-project | 51b1f50dda29e7b9f16a62af8d5364d18a7c2863 | [
"MIT"
] | null | null | null | python/kerasSlidingForecast.py | Jacopo47/ssd-project | 51b1f50dda29e7b9f16a62af8d5364d18a7c2863 | [
"MIT"
] | 1 | 2019-12-04T15:37:47.000Z | 2019-12-07T15:45:13.000Z | python/kerasSlidingForecast.py | Jacopo47/ssd-project | 51b1f50dda29e7b9f16a62af8d5364d18a7c2863 | [
"MIT"
] | null | null | null | # Sliding window MLP, Airline Passengers dataset (predicts t+1)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.layers import Dense
from keras.models import Sequential
import common
# from series of values to windows matrix
# questa funzione calcola la finestra
def compute_windows(nparray, npast=1):
dataX, dataY = [], [] # window and value
for i in range(len(nparray) - npast - 1):
a = nparray[i:(i + npast), 0]
dataX.append(a)
dataY.append(nparray[i + npast, 0])
return np.array(dataX), np.array(dataY)
def sliding_window_forecasting(path_to_sqlite, customer):
np.random.seed(550)
# for reproducibility
df = common.load_orders(path_to_sqlite, "'" + customer + "'")
dataset = df.values
# time series values
dataset = dataset.astype('float32') # needed for MLP input
# train - test sets
cutpoint = 3
# 70% train, 30% test
train, test = dataset[:-cutpoint], dataset[-cutpoint:]
print("Len train={0}, len test={1}".format(len(train), len(test)))
# sliding window matrices (npast = window width); dim = n - npast - 1
npast = 3
# trainX, trainY = compute_windows(train, npast)
# testX, testY = compute_windows(test, npast) # should get also the last npred of train
windowX, windowY = compute_windows(dataset, npast)
trainX, trainY = windowX[:-cutpoint], windowY[:-cutpoint]
testX, testY = windowX[-cutpoint:], windowY[-cutpoint:]
# Multilayer Perceptron model
model = Sequential()
n_hidden = 8 # neuroni strato nascosto
n_output = 1 # neuroni di output
# Da qui costruisco la rete
# Aggiungo allo strato di input uno strato Dense (che vuol dire tutti i neuroni collegati tra loro)
model.add(Dense(n_hidden, input_dim=npast, activation='relu')) # hidden neurons, 1 layer
# Aggiungo uno strato alla rete, questo è lo stato di uscita (Dense vuol dire che tutti i neuroni di questo stato
# sono collegati a tutti quelli prima)
model.add(Dense(n_output)) # output neurons
# Compilo il modello, effettivamente istanzio la rete in memoria, dice che dovrà addestrare la rete cercando di
# minimizzare la funzione di errore. La loss function scelta è mean_squared_error Optimizer -> adam è un
# ottimizzatore gredy ottimizzato
model.compile(loss='mean_squared_error', optimizer='adam')
# Model.fit è l'istruzione che lancia effettivamente backpropagation sulla rete. Da qui inizio ad apprendere Epochs
# sono il numero di iterazioni da fare Batch_size partiziona il TS e propone ogni volta di apprendere su un insieme
# di record (in poche parole utile per la parallelizzazione)
model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=2) # batch_size divisor of
# Model performance
# Si calcola l'errore sul Training SET
trainScore = model.evaluate(trainX, trainY, verbose=0)
print('Score on train: MSE = {0:0.2f} '.format(trainScore))
# Si calcola l'errore sul Test SET
testScore = model.evaluate(testX, testY, verbose=0)
print('Score on test: MSE = {0:0.2f} '.format(testScore))
trainPredict = model.predict(trainX) # predictions
# Il predict sul test ci fornisce i dati di forecast
testForecast = model.predict(testX) # forecast
plt.rcParams["figure.figsize"] = (10, 8) # redefines figure size
# plt.plot(train, label='Training set', color='blue') # blu
# plt.plot(test, label='Test set', color='green')
plt.plot(dataset, label='Dataset')
plt.plot(np.concatenate((np.full(1, np.nan), trainPredict[:, 0])), label='Train predict', color='orange') # orange
plt.plot(np.concatenate((np.full(len(train) + 1, np.nan), testForecast[:, 0])), label='Forecast',
color='red') # green
plt.legend()
plt.show()
sliding_window_forecasting('../ordiniMI2018.sqlite', 'cust40')
| 41.361702 | 119 | 0.692901 |
04dababa24602f942a294228f8f050ec35fcb2ec | 9,472 | py | Python | airflow/utils/context.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | airflow/utils/context.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | airflow/utils/context.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Jinja2 template rendering context helper."""
import contextlib
import copy
import functools
import warnings
from typing import (
AbstractSet,
Any,
Container,
Dict,
ItemsView,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
ValuesView,
)
import lazy_object_proxy
from airflow.utils.types import NOTSET
# NOTE: Please keep this in sync with Context in airflow/utils/context.pyi.
KNOWN_CONTEXT_KEYS = {
"conf",
"conn",
"dag",
"dag_run",
"data_interval_end",
"data_interval_start",
"ds",
"ds_nodash",
"execution_date",
"exception",
"inlets",
"logical_date",
"macros",
"next_ds",
"next_ds_nodash",
"next_execution_date",
"outlets",
"params",
"prev_data_interval_start_success",
"prev_data_interval_end_success",
"prev_ds",
"prev_ds_nodash",
"prev_execution_date",
"prev_execution_date_success",
"prev_start_date_success",
"run_id",
"task",
"task_instance",
"task_instance_key_str",
"test_mode",
"templates_dict",
"ti",
"tomorrow_ds",
"tomorrow_ds_nodash",
"ts",
"ts_nodash",
"ts_nodash_with_tz",
"try_number",
"var",
"yesterday_ds",
"yesterday_ds_nodash",
}
class VariableAccessor:
"""Wrapper to access Variable values in template."""
def __init__(self, *, deserialize_json: bool) -> None:
self._deserialize_json = deserialize_json
self.var: Any = None
def __getattr__(self, key: str) -> Any:
from airflow.models.variable import Variable
self.var = Variable.get(key, deserialize_json=self._deserialize_json)
return self.var
def __repr__(self) -> str:
return str(self.var)
def get(self, key, default: Any = NOTSET) -> Any:
from airflow.models.variable import Variable
if default is NOTSET:
return Variable.get(key, deserialize_json=self._deserialize_json)
return Variable.get(key, default, deserialize_json=self._deserialize_json)
class ConnectionAccessor:
"""Wrapper to access Connection entries in template."""
def __init__(self) -> None:
self.var: Any = None
def __getattr__(self, key: str) -> Any:
from airflow.models.connection import Connection
self.var = Connection.get_connection_from_secrets(key)
return self.var
def __repr__(self) -> str:
return str(self.var)
def get(self, key: str, default_conn: Any = None) -> Any:
from airflow.exceptions import AirflowNotFoundException
from airflow.models.connection import Connection
try:
return Connection.get_connection_from_secrets(key)
except AirflowNotFoundException:
return default_conn
class AirflowContextDeprecationWarning(DeprecationWarning):
"""Warn for usage of deprecated context variables in a task."""
def _create_deprecation_warning(key: str, replacements: List[str]) -> DeprecationWarning:
message = f"Accessing {key!r} from the template is deprecated and will be removed in a future version."
if not replacements:
return AirflowContextDeprecationWarning(message)
display_except_last = ", ".join(repr(r) for r in replacements[:-1])
if display_except_last:
message += f" Please use {display_except_last} or {replacements[-1]!r} instead."
else:
message += f" Please use {replacements[-1]!r} instead."
return AirflowContextDeprecationWarning(message)
class Context(MutableMapping[str, Any]):
"""Jinja2 template context for task rendering.
This is a mapping (dict-like) class that can lazily emit warnings when
(and only when) deprecated context keys are accessed.
"""
_DEPRECATION_REPLACEMENTS: Dict[str, List[str]] = {
"execution_date": ["data_interval_start", "logical_date"],
"next_ds": ["{{ data_interval_end | ds }}"],
"next_ds_nodash": ["{{ data_interval_end | ds_nodash }}"],
"next_execution_date": ["data_interval_end"],
"prev_ds": [],
"prev_ds_nodash": [],
"prev_execution_date": [],
"prev_execution_date_success": ["prev_data_interval_start_success"],
"tomorrow_ds": [],
"tomorrow_ds_nodash": [],
"yesterday_ds": [],
"yesterday_ds_nodash": [],
}
def __init__(self, context: Optional[MutableMapping[str, Any]] = None, **kwargs: Any) -> None:
self._context = context or {}
if kwargs:
self._context.update(kwargs)
self._deprecation_replacements = self._DEPRECATION_REPLACEMENTS.copy()
def __repr__(self) -> str:
return repr(self._context)
def __reduce_ex__(self, protocol: int) -> Tuple[Any, ...]:
"""Pickle the context as a dict.
We are intentionally going through ``__getitem__`` in this function,
instead of using ``items()``, to trigger deprecation warnings.
"""
items = [(key, self[key]) for key in self._context]
return dict, (items,)
def __copy__(self) -> "Context":
new = type(self)(copy.copy(self._context))
new._deprecation_replacements = self._deprecation_replacements.copy()
return new
def __getitem__(self, key: str) -> Any:
with contextlib.suppress(KeyError):
warnings.warn(_create_deprecation_warning(key, self._deprecation_replacements[key]))
with contextlib.suppress(KeyError):
return self._context[key]
raise KeyError(key)
def __setitem__(self, key: str, value: Any) -> None:
self._deprecation_replacements.pop(key, None)
self._context[key] = value
def __delitem__(self, key: str) -> None:
self._deprecation_replacements.pop(key, None)
del self._context[key]
def __contains__(self, key: object) -> bool:
return key in self._context
def __iter__(self) -> Iterator[str]:
return iter(self._context)
def __len__(self) -> int:
return len(self._context)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Context):
return NotImplemented
return self._context == other._context
def __ne__(self, other: Any) -> bool:
if not isinstance(other, Context):
return NotImplemented
return self._context != other._context
def keys(self) -> AbstractSet[str]:
return self._context.keys()
def items(self):
return ItemsView(self._context)
def values(self):
return ValuesView(self._context)
def context_merge(context: "Context", *args: Any, **kwargs: Any) -> None:
"""Merge parameters into an existing context.
Like ``dict.update()`` , this take the same parameters, and updates
``context`` in-place.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
context.update(*args, **kwargs)
def context_copy_partial(source: "Context", keys: Container[str]) -> "Context":
"""Create a context by copying items under selected keys in ``source``.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
new = Context({k: v for k, v in source._context.items() if k in keys})
new._deprecation_replacements = source._deprecation_replacements.copy()
return new
def lazy_mapping_from_context(source: Context) -> Mapping[str, Any]:
"""Create a mapping that wraps deprecated entries in a lazy object proxy.
This further delays deprecation warning to until when the entry is actually
used, instead of when it's accessed in the context. The result is useful for
passing into a callable with ``**kwargs``, which would unpack the mapping
too eagerly otherwise.
This is implemented as a free function because the ``Context`` type is
"faked" as a ``TypedDict`` in ``context.pyi``, which cannot have custom
functions.
:meta private:
"""
def _deprecated_proxy_factory(k: str, v: Any) -> Any:
replacements = source._deprecation_replacements[k]
warnings.warn(_create_deprecation_warning(k, replacements))
return v
def _create_value(k: str, v: Any) -> Any:
if k not in source._deprecation_replacements:
return v
factory = functools.partial(_deprecated_proxy_factory, k, v)
return lazy_object_proxy.Proxy(factory)
return {k: _create_value(k, v) for k, v in source._context.items()}
| 31.573333 | 107 | 0.669236 |
66fe04cfae2be053fecd93d3d4070c9ce7e6bc04 | 868 | py | Python | pennylane/templates/__init__.py | tobeyOguney/pennylane | 6f7449ce6529c4a35a6a07915bdbd7088f51b1d9 | [
"Apache-2.0"
] | 1 | 2021-06-05T23:46:32.000Z | 2021-06-05T23:46:32.000Z | pennylane/templates/__init__.py | israelferrazaraujo/pennylane | 0aaa797e17f3a010b564c3ffbb2fb3f8ad5d3de8 | [
"Apache-2.0"
] | null | null | null | pennylane/templates/__init__.py | israelferrazaraujo/pennylane | 0aaa797e17f3a010b564c3ffbb2fb3f8ad5d3de8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a growing library of templates of common variational
circuit architectures that can be used to easily build,
evaluate, and train quantum nodes.
.. currentmodule:: pennylane.templates
.. autosummary::
:toctree: api
embeddings
layers
"""
| 33.384615 | 74 | 0.762673 |
f9848c1a7dc600ab6c86b87e9820a4fd0c5f3c90 | 45 | py | Python | py/builtin/__init__.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | py/builtin/__init__.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | py/builtin/__init__.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | """ backports and additions of builtins """
| 15 | 43 | 0.688889 |
9750c0ec906a25f4863090e990c5db8de77eb227 | 15,820 | py | Python | src/python/pants/reporting/reporting_server.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | src/python/pants/reporting/reporting_server.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | src/python/pants/reporting/reporting_server.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import BaseHTTPServer
import itertools
import json
import logging
import mimetypes
import os
import pkgutil
import re
import urllib
import urlparse
from collections import namedtuple
from datetime import date, datetime
import pystache
from six.moves import range
from pants.base.build_environment import get_buildroot
from pants.base.mustache import MustacheRenderer
from pants.base.run_info import RunInfo
from pants.pantsd.process_manager import ProcessManager
logger = logging.getLogger(__name__)
# Google Prettyprint plugin files.
PPP_RE = re.compile("""^lang-.*\.js$""")
class PantsHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler that demultiplexes various pants reporting URLs."""
def __init__(self, settings, renderer, request, client_address, server):
self._settings = settings # An instance of ReportingServer.Settings.
self._root = self._settings.root
self._renderer = renderer
self._client_address = client_address
# The underlying handlers for specific URL prefixes.
self._GET_handlers = [
('/runs/', self._handle_runs), # Show list of known pants runs.
('/run/', self._handle_run), # Show a report for a single pants run.
('/browse/', self._handle_browse), # Browse filesystem under build root.
('/content/', self._handle_content), # Show content of file.
('/assets/', self._handle_assets), # Statically serve assets (css, js etc.)
('/poll', self._handle_poll), # Handle poll requests for raw file content.
('/latestrunid', self._handle_latest_runid) # Return id of latest pants run.
]
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""GET method implementation for BaseHTTPRequestHandler."""
if not self._client_allowed():
return
try:
(_, _, path, query, _) = urlparse.urlsplit(self.path)
params = urlparse.parse_qs(query)
# Give each handler a chance to respond.
for prefix, handler in self._GET_handlers:
if self._maybe_handle(prefix, handler, path, params):
return
# If no path specified, default to showing the list of all runs.
if path == '/':
self._handle_runs('', {})
return
self._send_content('Invalid GET request {}'.format(self.path), 'text/html')
except (IOError, ValueError):
pass # Printing these errors gets annoying, and there's nothing to do about them anyway.
#sys.stderr.write('Invalid GET request {}'.format(self.path))
def _handle_runs(self, relpath, params):
"""Show a listing of all pants runs since the last clean-all."""
runs_by_day = self._partition_runs_by_day()
args = self._default_template_args('run_list')
args['runs_by_day'] = runs_by_day
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _handle_run(self, relpath, params):
"""Show the report for a single pants run."""
args = self._default_template_args('run')
run_id = relpath
run_info = self._get_run_info_dict(run_id)
if run_info is None:
args['no_such_run'] = relpath
if run_id == 'latest':
args['is_latest'] = 'none'
else:
report_abspath = run_info['default_report']
report_relpath = os.path.relpath(report_abspath, self._root)
report_dir = os.path.dirname(report_relpath)
self_timings_path = os.path.join(report_dir, 'self_timings')
cumulative_timings_path = os.path.join(report_dir, 'cumulative_timings')
artifact_cache_stats_path = os.path.join(report_dir, 'artifact_cache_stats')
run_info['timestamp_text'] = \
datetime.fromtimestamp(float(run_info['timestamp'])).strftime('%H:%M:%S on %A, %B %d %Y')
args.update({'run_info': run_info,
'report_path': report_relpath,
'self_timings_path': self_timings_path,
'cumulative_timings_path': cumulative_timings_path,
'artifact_cache_stats_path': artifact_cache_stats_path})
if run_id == 'latest':
args['is_latest'] = run_info['id']
args.update({
'collapsible': lambda x: self._renderer.render_callable('collapsible', x, args)
})
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _handle_browse(self, relpath, params):
"""Handle requests to browse the filesystem under the build root."""
abspath = os.path.normpath(os.path.join(self._root, relpath))
if not abspath.startswith(self._root):
raise ValueError # Prevent using .. to get files from anywhere other than root.
if os.path.isdir(abspath):
self._serve_dir(abspath, params)
elif os.path.isfile(abspath):
self._serve_file(abspath, params)
def _handle_content(self, relpath, params):
"""Render file content for pretty display."""
abspath = os.path.normpath(os.path.join(self._root, relpath))
if os.path.isfile(abspath):
with open(abspath, 'r') as infile:
content = infile.read()
else:
content = 'No file found at {}'.format(abspath)
content_type = mimetypes.guess_type(abspath)[0] or 'text/plain'
if not content_type.startswith('text/') and not content_type == 'application/xml':
# Binary file. Display it as hex, split into lines.
n = 120 # Display lines of this max size.
content = repr(content)[1:-1] # Will escape non-printables etc, dropping surrounding quotes.
content = '\n'.join([content[i:i + n] for i in range(0, len(content), n)])
prettify = False
prettify_extra_langs = []
else:
prettify = True
if self._settings.assets_dir:
prettify_extra_dir = os.path.join(self._settings.assets_dir, 'js', 'prettify_extra_langs')
prettify_extra_langs = [{'name': x} for x in os.listdir(prettify_extra_dir)]
else:
# TODO: Find these from our package, somehow.
prettify_extra_langs = []
linenums = True
args = {'prettify_extra_langs': prettify_extra_langs, 'content': content,
'prettify': prettify, 'linenums': linenums}
self._send_content(self._renderer.render_name('file_content', args), 'text/html')
def _handle_assets(self, relpath, params):
"""Statically serve assets: js, css etc."""
if self._settings.assets_dir:
abspath = os.path.normpath(os.path.join(self._settings.assets_dir, relpath))
with open(abspath, 'r') as infile:
content = infile.read()
else:
content = pkgutil.get_data(__name__, os.path.join('assets', relpath))
content_type = mimetypes.guess_type(relpath)[0] or 'text/plain'
self._send_content(content, content_type)
def _handle_poll(self, relpath, params):
"""Handle poll requests for raw file contents."""
request = json.loads(params.get('q')[0])
ret = {}
# request is a polling request for multiple files. For each file:
# - id is some identifier assigned by the client, used to differentiate the results.
# - path is the file to poll.
# - pos is the last byte position in that file seen by the client.
for poll in request:
_id = poll.get('id', None)
path = poll.get('path', None)
pos = poll.get('pos', 0)
if path:
abspath = os.path.normpath(os.path.join(self._root, path))
if os.path.isfile(abspath):
with open(abspath, 'r') as infile:
if pos:
infile.seek(pos)
content = infile.read()
ret[_id] = content
self._send_content(json.dumps(ret), 'application/json')
def _handle_latest_runid(self, relpath, params):
"""Handle request for the latest run id.
Used by client-side javascript to detect when there's a new run to display.
"""
latest_runinfo = self._get_run_info_dict('latest')
if latest_runinfo is None:
self._send_content('none', 'text/plain')
else:
self._send_content(latest_runinfo['id'], 'text/plain')
def _partition_runs_by_day(self):
"""Split the runs by day, so we can display them grouped that way."""
run_infos = self._get_all_run_infos()
for x in run_infos:
ts = float(x['timestamp'])
x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S')
def date_text(dt):
delta_days = (date.today() - dt).days
if delta_days == 0:
return 'Today'
elif delta_days == 1:
return 'Yesterday'
elif delta_days < 7:
return dt.strftime('%A') # Weekday name.
else:
d = dt.day % 10
suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th'
return dt.strftime('%B %d') + suffix # E.g., October 30th.
keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp']))
sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True)
return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]}
for dt, infos in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())]
def _get_run_info_dict(self, run_id):
"""Get the RunInfo for a run, as a dict."""
run_info_path = os.path.join(self._settings.info_dir, run_id, 'info')
if os.path.exists(run_info_path):
# We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.
return RunInfo(run_info_path).get_as_dict()
else:
return None
def _get_all_run_infos(self):
"""Find the RunInfos for all runs since the last clean-all."""
info_dir = self._settings.info_dir
if not os.path.isdir(info_dir):
return []
paths = [os.path.join(info_dir, x) for x in os.listdir(info_dir)]
# We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.
# We filter only those that have a timestamp, to avoid a race condition with writing
# that field.
return filter(lambda d: 'timestamp' in d, [RunInfo(os.path.join(p, 'info')).get_as_dict()
for p in paths if os.path.isdir(p) and not os.path.islink(p)])
def _serve_dir(self, abspath, params):
"""Show a directory listing."""
relpath = os.path.relpath(abspath, self._root)
breadcrumbs = self._create_breadcrumbs(relpath)
entries = [{'link_path': os.path.join(relpath, e), 'name': e} for e in os.listdir(abspath)]
args = self._default_template_args('dir')
args.update({'root_parent': os.path.dirname(self._root),
'breadcrumbs': breadcrumbs,
'entries': entries,
'params': params})
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _serve_file(self, abspath, params):
"""Show a file.
The actual content of the file is rendered by _handle_content."""
relpath = os.path.relpath(abspath, self._root)
breadcrumbs = self._create_breadcrumbs(relpath)
link_path = urlparse.urlunparse([None, None, relpath, None, urllib.urlencode(params), None])
args = self._default_template_args('file')
args.update({'root_parent': os.path.dirname(self._root),
'breadcrumbs': breadcrumbs,
'link_path': link_path})
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _send_content(self, content, content_type, code=200):
"""Send content to client."""
self.send_response(code)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def _client_allowed(self):
"""Check if client is allowed to connect to this server."""
client_ip = self._client_address[0]
if not client_ip in self._settings.allowed_clients and \
not 'ALL' in self._settings.allowed_clients:
self._send_content('Access from host {} forbidden.'.format(client_ip), 'text/html')
return False
return True
def _maybe_handle(self, prefix, handler, path, params, data=None):
"""Apply the handler if the prefix matches."""
if path.startswith(prefix):
relpath = path[len(prefix):]
if data:
handler(relpath, params, data)
else:
handler(relpath, params)
return True
else:
return False
def _create_breadcrumbs(self, relpath):
"""Create filesystem browsing breadcrumb navigation.
That is, make each path segment into a clickable element that takes you to that dir.
"""
if relpath == '.':
breadcrumbs = []
else:
path_parts = [os.path.basename(self._root)] + relpath.split(os.path.sep)
path_links = ['/'.join(path_parts[1:i + 1]) for i, name in enumerate(path_parts)]
breadcrumbs = [{'link_path': link_path, 'name': name}
for link_path, name in zip(path_links, path_parts)]
return breadcrumbs
def _default_template_args(self, content_template):
"""Initialize template args."""
def include(text, args):
template_name = pystache.render(text, args)
return self._renderer.render_name(template_name, args)
# Our base template calls include on the content_template.
ret = {'content_template': content_template}
ret['include'] = lambda text: include(text, ret)
return ret
def log_message(self, fmt, *args):
"""Silence BaseHTTPRequestHandler's logging."""
class ReportingServer(object):
"""Reporting Server HTTP server."""
class Settings(namedtuple('Settings', ['info_dir', 'template_dir', 'assets_dir', 'root',
'allowed_clients'])):
"""Reporting server settings.
info_dir: path to dir containing RunInfo files.
template_dir: location of mustache template files. If None, the templates
embedded in our package are used.
assets_dir: location of assets (js, css etc.) If None, the assets
embedded in our package are used.
root: build root.
allowed_clients: list of ips or ['ALL'].
"""
def __init__(self, port, settings):
renderer = MustacheRenderer(settings.template_dir, __name__)
class MyHandler(PantsHandler):
def __init__(self, request, client_address, server):
PantsHandler.__init__(self, settings, renderer, request, client_address, server)
self._httpd = BaseHTTPServer.HTTPServer(('', port), MyHandler)
self._httpd.timeout = 0.1 # Not the network timeout, but how often handle_request yields.
def server_port(self):
return self._httpd.server_port
def start(self):
self._httpd.serve_forever()
class ReportingServerManager(ProcessManager):
def __init__(self, context=None, options=None):
ProcessManager.__init__(self, name='reporting_server')
self.context = context
self.options = options
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemonize()."""
# The server finds run-specific info dirs by looking at the subdirectories of info_dir,
# which is conveniently and obviously the parent dir of the current run's info dir.
info_dir = os.path.dirname(self.context.run_tracker.run_info_dir)
settings = ReportingServer.Settings(info_dir=info_dir,
root=get_buildroot(),
template_dir=self.options.template_dir,
assets_dir=self.options.assets_dir,
allowed_clients=self.options.allowed_clients)
server = ReportingServer(self.options.port, settings)
self.write_socket(server.server_port())
# Block forever.
server.start()
| 40.984456 | 99 | 0.669659 |
d0ac88e0aa5718ecf16db6c614f8cabeba46fdb6 | 1,155 | py | Python | exact-4-regular-u/main_exact_u_d2_25_d.py | mablem8/dandelion-simulations-py | c16893bac90207bf5fc0c80005189cab8c02238c | [
"CC0-1.0"
] | null | null | null | exact-4-regular-u/main_exact_u_d2_25_d.py | mablem8/dandelion-simulations-py | c16893bac90207bf5fc0c80005189cab8c02238c | [
"CC0-1.0"
] | null | null | null | exact-4-regular-u/main_exact_u_d2_25_d.py | mablem8/dandelion-simulations-py | c16893bac90207bf5fc0c80005189cab8c02238c | [
"CC0-1.0"
] | null | null | null | exec(open('./generate_graph.py').read())
exec(open('./regularize_graph.py').read())
exec(open('./generate_one_to_one_maps.py').read())
exec(open('./generate_adversary_observations.py').read())
exec(open('./generate_max_weight_matching.py').read())
exec(open('./calculate_precision.py').read())
n=100
d=2
ps=[0.25]
num_trials=8
header=['Fraction of Spies']
for trial in range(num_trials):
header.append('Trial '+str(trial)+' Precision')
import csv
with open('data-exact-u-d2-25-d.csv', 'w', newline='') as csvfile:
datawriter = csv.writer(csvfile, delimiter=',')
datawriter.writerow(header)
results=[]
for p in ps:
results.append([str(p)])
for trial in range(num_trials):
graph = generate_graph(n,p,d)
graph = regularize_graph(graph,d)
o2omp = generate_one_to_one_maps(graph)
adobs = generate_adversary_observations(graph, o2omp)
mwmat = generate_max_weight_matching(graph, d, adobs)
prcsn = calculate_precision(adobs, mwmat)
results[-1].append(str(prcsn))
with open('data-exact-u-d2-25-d.csv', 'a', newline='') as csvfile:
datawriter = csv.writer(csvfile, delimiter=',')
datawriter.writerow(results[-1])
| 33 | 68 | 0.709091 |
3b0da50a2eb4ce92a402cd7546ab66758e6e6206 | 135 | py | Python | hola.py | financieras/coder | aecf02d6828d3a0c4d1c2b7cd935ed42545031b8 | [
"MIT"
] | null | null | null | hola.py | financieras/coder | aecf02d6828d3a0c4d1c2b7cd935ed42545031b8 | [
"MIT"
] | null | null | null | hola.py | financieras/coder | aecf02d6828d3a0c4d1c2b7cd935ed42545031b8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
nombre=sys.argv[-1]
print("Hola "+nombre)
# se ejecuta desde la terminal tecleando:
# python3 hola.py Luis | 22.5 | 41 | 0.740741 |
231a1a10d94f29c435ad2aa74a76d906e1d8f506 | 154,697 | py | Python | pandas/core/index.py | ischwabacher/pandas | d17af6e41dd207620aa5f96d8d7fd2ffe4f5bac5 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/index.py | ischwabacher/pandas | d17af6e41dd207620aa5f96d8d7fd2ffe4f5bac5 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/core/index.py | ischwabacher/pandas | d17af6e41dd207620aa5f96d8d7fd2ffe4f5bac5 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # pylint: disable=E1101,E1103,W0232
import datetime
import warnings
import operator
from functools import partial
from pandas.compat import range, zip, lrange, lzip, u, reduce
from pandas import compat
import numpy as np
import pandas.tslib as tslib
import pandas.lib as lib
import pandas.algos as _algos
import pandas.index as _index
from pandas.lib import Timestamp, Timedelta, is_datetime_array
from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs
from pandas.util.decorators import Appender, cache_readonly, deprecate
from pandas.core.common import isnull, array_equivalent
import pandas.core.common as com
from pandas.core.common import (_values_from_object, is_float, is_integer,
ABCSeries, _ensure_object)
from pandas.core.config import get_option
# simplify
default_pprint = lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='')
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _indexOp(opname):
"""
Wrapper function for index comparison operations, to avoid
code duplication.
"""
def wrapper(self, other):
func = getattr(self._data.view(np.ndarray), opname)
result = func(np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if com.is_bool_dtype(result):
return result
try:
return Index(result)
except: # pragma: no cover
return result
return wrapper
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__ """
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_groupby = _algos.groupby_object
_arrmap = _algos.arrmap_object
_left_indexer_unique = _algos.left_join_indexer_unique_object
_left_indexer = _algos.left_join_indexer_object
_inner_indexer = _algos.inner_join_indexer_object
_outer_indexer = _algos.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_engine_type = _index.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
tupleize_cols=True, **kwargs):
# no class inference!
if fastpath:
return cls._simple_new(data, name)
from pandas.tseries.period import PeriodIndex
if isinstance(data, (np.ndarray, Index, ABCSeries)):
if issubclass(data.dtype.type, np.datetime64):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif issubclass(data.dtype.type, np.timedelta64):
from pandas.tseries.tdi import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
data = np.array(data, dtype=dtype, copy=copy)
except TypeError:
pass
elif isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if issubclass(data.dtype.type, np.integer):
return Int64Index(data, copy=copy, dtype=dtype, name=name)
if issubclass(data.dtype.type, np.floating):
return Float64Index(data, copy=copy, dtype=dtype, name=name)
if com.is_bool_dtype(data):
subarr = data
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or np.isscalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and isinstance(data, list) and data:
try:
sorted(data)
has_mixed_types = False
except (TypeError, UnicodeDecodeError):
has_mixed_types = True # python3 only
if isinstance(data[0], tuple) and not has_mixed_types:
try:
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
except (TypeError, KeyError):
pass # python2 - MultiIndex fails on mixed types
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
return Int64Index(subarr.astype('i8'), copy=copy, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'boolean':
# don't support boolean explicity ATM
pass
elif inferred != 'string':
if (inferred.startswith('datetime') or
tslib.is_timestamp_array(subarr)):
from pandas.tseries.index import DatetimeIndex
return DatetimeIndex(subarr, copy=copy, name=name, **kwargs)
elif (inferred.startswith('timedelta') or
lib.is_timedelta_array(subarr)):
from pandas.tseries.tdi import TimedeltaIndex
return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs)
elif inferred == 'period':
return PeriodIndex(subarr, name=name, **kwargs)
return cls._simple_new(subarr, name)
@classmethod
def _simple_new(cls, values, name=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result,k,v)
result._reset_identity()
return result
def _update_inplace(self, result):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(other, '_id', Ellipsis)
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._shallow_copy(result)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
def get_values(self):
""" return the underlying data as an ndarray """
return self.values
def _array_values(self):
return self._data
# ops compat
def tolist(self):
"""
return a list of the Index values
"""
return list(self.values)
def repeat(self, n):
"""
return a new Index of the values repeated n times
See also
--------
numpy.ndarray.repeat
"""
return self._shallow_copy(self.values.repeat(n))
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self.values.ravel(order=order)
# construction helpers
@classmethod
def _scalar_data_error(cls, data):
raise TypeError(
'{0}(...) must be called with a collection of some kind, {1} was '
'passed'.format(cls.__name__, repr(data))
)
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays."""
if not isinstance(data, (np.ndarray, Index)):
if data is None or np.isscalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return dict([ (k,getattr(self,k,None)) for k in self._attributes])
def view(self, cls=None):
if cls is not None and not issubclass(cls, Index):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _shallow_copy(self, values=None, **kwargs):
""" create a new Index, don't copy the data, use the same object attributes
with passed in attributes taking precedence """
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self.__class__._simple_new(values,**attributes)
def copy(self, names=None, name=None, dtype=None, deep=False):
"""
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if deep:
from copy import deepcopy
new_index = self._shallow_copy(self._data.copy())
name = name or deepcopy(self.name)
else:
new_index = self._shallow_copy()
name = self.name
if name is not None:
names = [name]
if names:
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
__copy__ = copy
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
return Series(self._to_embed(), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
return self.values
def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
dtype=dtype)
def to_datetime(self, dayfirst=False):
"""
For an Index containing strings or datetime.datetime objects, attempt
conversion to DatetimeIndex
"""
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
parsed = lib.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
def _assert_can_do_setop(self, other):
return True
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name,))
def _set_names(self, values, level=None):
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d'
% len(values))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int or level name, or sequence of int / level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels)
Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
if level is not None and self.nlevels == 1:
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not com.is_list_like(level) and com.is_list_like(names):
raise TypeError("Names must be a string")
if not com.is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not com.is_list_like(names):
names = [names]
if level is not None and not com.is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def summary(self, name=None):
if len(self) > 0:
head = self[0]
if hasattr(head, 'format') and\
not isinstance(head, compat.string_types):
head = head.format()
tail = self[-1]
if hasattr(tail, 'format') and\
not isinstance(tail, compat.string_types):
tail = tail.format()
index_summary = ', %s to %s' % (com.pprint_thing(head),
com.pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
@property
def is_monotonic(self):
""" return if the index has monotonic (only equaly or increasing) values """
return self._engine.is_monotonic
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly(allow_setting=True)
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_mixed(self):
return 'mixed' in self.inferred_type
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
def _convert_scalar_indexer(self, key, typ=None):
""" convert a scalar indexer, right now we are converting
floats -> ints if the index supports it
"""
def to_int():
ikey = int(key)
if ikey != key:
return self._convert_indexer_error(key, 'label')
return ikey
if typ == 'iloc':
if is_integer(key):
return key
elif is_float(key):
key = to_int()
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return key
return self._convert_indexer_error(key, 'label')
if is_float(key):
if not self.is_floating():
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return to_int()
return key
def _validate_slicer(self, key, f):
""" validate and raise if needed on a slice indexers according to the
passed in function """
for c in ['start','stop','step']:
if not f(getattr(key,c)):
self._convert_indexer_error(key.start, 'slice {0} value'.format(c))
def _convert_slice_indexer_getitem(self, key, is_index_slice=False):
""" called from the getitem slicers, determine how to treat the key
whether positional or not """
if self.is_integer() or is_index_slice:
return key
return self._convert_slice_indexer(key)
def _convert_slice_indexer(self, key, typ=None):
""" convert a slice indexer. disallow floats in the start/stop/step """
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if typ == 'iloc':
# need to coerce to_int if needed
def f(c):
v = getattr(key,c)
if v is None or is_integer(v):
return v
# warn if its a convertible float
if v == int(v):
warnings.warn("slice indexers when using iloc should be integers "
"and not floating point",FutureWarning)
return int(v)
self._convert_indexer_error(v, 'slice {0} value'.format(c))
return slice(*[ f(c) for c in ['start','stop','step']])
# validate slicers
def validate(v):
if v is None or is_integer(v):
return True
# dissallow floats
elif is_float(v):
return False
return True
self._validate_slicer(key, validate)
# figure out if this is a positional indexer
start, stop, step = key.start, key.stop, key.step
def is_int(v):
return v is None or is_integer(v)
is_null_slice = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if typ == 'getitem':
return self._convert_slice_indexer_getitem(
key, is_index_slice=is_index_slice)
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
if start is not None:
i = self.get_loc(start)
if stop is not None:
j = self.get_loc(stop)
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slice:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_list_indexer(self, key, typ=None):
""" convert a list indexer. these should be locations """
return key
def _convert_list_indexer_for_mixed(self, keyarr, typ=None):
""" passed a key that is tuplesafe that is integer based
and we have a mixed index (e.g. number/labels). figure out
the indexer. return None if we can't help
"""
if (typ is None or typ in ['iloc','ix']) and (com.is_integer_dtype(keyarr) and not self.is_floating()):
if self.inferred_type != 'integer':
keyarr = np.where(keyarr < 0,
len(self) + keyarr, keyarr)
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
from pandas.core.indexing import _maybe_convert_indices
return _maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
return keyarr
return None
def _convert_indexer_error(self, key, msg=None):
if msg is None:
msg = 'label'
raise TypeError("the {0} [{1}] is not a proper indexer for this index "
"type ({2})".format(msg, key, self.__class__.__name__))
def get_duplicates(self):
from collections import defaultdict
counter = defaultdict(lambda: 0)
for k in self.values:
counter[k] += 1
return sorted(k for k, v in compat.iteritems(counter) if v > 1)
_get_duplicates = get_duplicates
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self.values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level,))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)'
% (level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def is_type_compatible(self, typ):
return typ == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __iter__(self):
return iter(self.values)
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __deepcopy__(self, memo={}):
return self.copy(deep=True)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
return key in self._engine
except TypeError:
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Indexes does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if np.isscalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com._is_bool_indexer(key):
key = np.asarray(key)
key = _values_from_object(key)
result = getitem(key)
if not np.isscalar(result):
return promote(result)
else:
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(np.concatenate(to_concat), name=name)
@staticmethod
def _ensure_compat_concat(indexes):
from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
is_ts = [isinstance(idx, klasses) for idx in indexes]
if any(is_ts) and not all(is_ts):
return [_maybe_box(idx) for idx in indexes]
return indexes
def take(self, indexer, axis=0):
"""
return a new Index of the values selected by the indexer
See also
--------
numpy.ndarray.take
"""
indexer = com._ensure_platform_int(indexer)
taken = np.array(self).take(indexer)
# by definition cannot propogate freq
return self._shallow_copy(taken, freq=None)
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
np.putmask(values, mask, value)
return self._shallow_copy(values)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(com.pprint_thing(self.name,
escape_chars=('\t', '\r', '\n'))
if self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.core.format import format_array
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values, safe=1)
if values.dtype == np.object_:
result = [com.pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isnull(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
""" slice and dice then format """
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', **kwargs):
""" actually format my specific types """
mask = isnull(self)
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values.tolist()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if type(other) != Index:
return other.equals(self)
return array_equivalent(self, other)
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found
"""
if isinstance(label, (Index, ABCSeries, np.ndarray)):
raise TypeError('%s' % type(label))
if not isinstance(label, Timestamp):
label = Timestamp(label)
if label not in self:
loc = self.searchsorted(label, side='left')
if loc > 0:
return self[loc - 1]
else:
return np.nan
return label
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError('Cannot sort an %r object' % self.__class__.__name__)
def shift(self, periods=1, freq=None):
"""
Shift Index containing datetime objects by input number of periods and
DateOffset
Returns
-------
shifted : Index
"""
if periods == 0:
# OK because immutable
return self
offset = periods * freq
return Index([idx + offset for idx in self], name=self.name)
def argsort(self, *args, **kwargs):
"""
return an ndarray indexer of the underlying data
See also
--------
numpy.ndarray.argsort
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
if isinstance(other, Index):
warnings.warn("using '+' to provide set union with Indexes is deprecated, "
"use '|' or .union()",FutureWarning)
return self.union(other)
return Index(np.array(self) + other)
__iadd__ = __add__
__eq__ = _indexOp('__eq__')
__ne__ = _indexOp('__ne__')
__lt__ = _indexOp('__lt__')
__gt__ = _indexOp('__gt__')
__le__ = _indexOp('__le__')
__ge__ = _indexOp('__ge__')
def __sub__(self, other):
if isinstance(other, Index):
warnings.warn("using '-' to provide set differences with Indexes is deprecated, "
"use .difference()",FutureWarning)
return self.difference(other)
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.sym_diff(other)
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable.')
if len(other) == 0 or self.equals(other):
return self
if len(self) == 0:
return _ensure_index(other)
self._assert_can_do_setop(other)
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.union(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(self.values, other.values)[0]
except TypeError:
# incomparable objects
result = list(self.values)
# worth making this faster? a very unusual case
value_set = set(self.values)
result.extend([x for x in other.values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = com.take_nd(other.values, indexer,
allow_fill=False)
result = com._concat_compat((self.values, other_diff))
try:
self.values[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = self.values
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(data=result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects. Sortedness of the result is
not guaranteed
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(self.values, other.values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = self.get_indexer(other.values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
indexer = self.get_indexer_non_unique(other.values)[0].unique()
taken = self.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Compute sorted set difference of two Index objects
Parameters
----------
other : Index or array-like
Returns
-------
diff : Index
Notes
-----
One can do either of these and achieve the same result
>>> index.difference(index2)
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
if self.equals(other):
return Index([], name=self.name)
if not isinstance(other, Index):
other = np.asarray(other)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
theDiff = sorted(set(self) - set(other))
return Index(theDiff, name=result_name)
diff = deprecate('diff',difference)
def sym_diff(self, other, result_name=None):
"""
Compute the sorted symmetric difference of two Index objects.
Parameters
----------
other : array-like
result_name : str
Returns
-------
sym_diff : Index
Notes
-----
``sym_diff`` contains elements that appear in either ``idx1`` or
``idx2`` but not both. Equivalent to the Index created by
``(idx1 - idx2) + (idx2 - idx1)`` with duplicates dropped.
The sorting of a result containing ``NaN`` values is not guaranteed
across Python versions. See GitHub issue #6444.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.sym_diff(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
if not isinstance(other, Index):
other = Index(other)
result_name = result_name or self.name
the_diff = sorted(set((self.difference(other)).union(other.difference(self))))
return Index(the_diff, name=result_name)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
return self._engine.get_loc(_values_from_object(key))
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
k = _values_from_object(key)
# prevent integer truncation bug in indexing
if is_float(k) and not self.is_floating():
raise KeyError
try:
return self._engine.get_value(s, k)
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer','boolean']:
raise
try:
return tslib.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if com.is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if np.isscalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(
_values_from_object(arr), _values_from_object(key), value)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int
Returns
-------
values : ndarray
"""
# checks that level number is actually just 1
self._validate_index_level(level)
return self
def get_indexer(self, target, method=None, limit=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index. The mask determines whether labels are
found or not in the current index
Parameters
----------
target : Index
method : {'pad', 'ffill', 'backfill', 'bfill'}
pad / ffill: propagate LAST valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Notes
-----
This is a low-level method and probably should be used at your own risk
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray
"""
method = self._get_method(method)
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit)
if self.dtype != target.dtype:
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad':
if not self.is_monotonic or not target.is_monotonic:
raise ValueError('Must be monotonic for forward fill')
indexer = self._engine.get_pad_indexer(target.values, limit)
elif method == 'backfill':
if not self.is_monotonic or not target.is_monotonic:
raise ValueError('Must be monotonic for backward fill')
indexer = self._engine.get_backfill_indexer(target.values, limit)
elif method is None:
indexer = self._engine.get_indexer(target.values)
else:
raise ValueError('unrecognized method: %s' % method)
return com._ensure_platform_int(indexer)
def get_indexer_non_unique(self, target, **kwargs):
""" return an indexer suitable for taking from a non unique index
return the labels in the same order as the target, and
return a missing indexer into the target (missing are marked as -1
in the indexer); target must be an iterable """
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target.values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
def get_indexer_for(self, target, **kwargs):
""" guaranteed return of an indexer even when non-unique """
if self.is_unique:
return self.get_indexer(target, **kwargs)
return self.get_indexer_non_unique(target, **kwargs)[0]
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if self.dtype != 'object':
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, to_groupby):
return self._groupby(self.values, _values_from_object(to_groupby))
def map(self, mapper):
return self._arrmap(self.values, mapper)
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values
Parameters
----------
values : set or sequence of values
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index is a
MultiIndex).
Notes
-----
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
"""
value_set = set(values)
if level is not None:
self._validate_index_level(level)
return lib.ismember(self._array_values(), value_set)
def _get_method(self, method):
if method:
method = method.lower()
aliases = {
'ffill': 'pad',
'bfill': 'backfill'
}
return aliases.get(method, method)
def reindex(self, target, method=None, level=None, limit=None,
copy_if_needed=False):
"""
For Index, simply returns the new index and the results of
get_indexer. Provided here to enable an interface that is amenable for
subclasses of Index whose internals are different (like MultiIndex)
Returns
-------
(new_index, indexer, mask) : tuple
"""
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
# to avoid aliasing an existing index
if (copy_if_needed and target.name != self.name and
self.name is not None):
if target.name is None:
target = self.copy()
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
return target, indexer
def join(self, other, how='left', level=None, return_indexers=False):
"""
Internal API method. Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
Returns
-------
join_index, (left_indexer, right_indexer)
"""
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if (level is None and (self_is_mi or other_is_mi)):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how, return_indexers=return_indexers)
# join on the level
if (level is not None and (self_is_mi or other_is_mi)):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how,
return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = [ n for n in self.names if n is not None ]
other_names = [ n for n in other.names if n is not None ]
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level overlap on a multi-index is not implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.tools.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self.values], [other.values],
how=how, sort=True)
left_idx = com._ensure_platform_int(left_idx)
right_idx = com._ensure_platform_int(right_idx)
join_index = self.values.take(left_idx)
mask = left_idx == -1
np.putmask(join_index, mask, other.values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. The order of the data indexed by
the MultiIndex will not be changed (currently)
"""
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is not None:
left_lev_indexer = com._ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = com.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
omit_mask = new_lev_labels != -1
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
if not omit_mask.all():
new_labels = [lab[omit_mask] for lab in new_labels]
new_levels = list(left.levels)
new_levels[level] = new_level
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
left_indexer = np.arange(len(left))[new_lev_labels != -1]
else:
join_index = left
left_indexer = None
if right_lev_indexer is not None:
right_indexer = com.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self.values
ov = other.values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(other, self)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def slice_indexer(self, start=None, end=None, step=None):
"""
For an ordered Index, compute the slice indexer for input labels and
step
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
Returns
-------
indexer : ndarray or slice
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
start_slice, end_slice = self.slice_locs(start, end)
# return a slice
if np.isscalar(start_slice) and np.isscalar(end_slice):
# degenerate cases
if start is None and end is None:
return slice(None, None, step)
return slice(start_slice, end_slice, step)
# loc indexers
return (Index(start_slice) & Index(end_slice)).values
def slice_locs(self, start=None, end=None):
"""
For an ordered Index, compute the slice locations for input labels
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
Returns
-------
(start, end) : (int, int)
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
is_unique = self.is_unique
def _get_slice(starting_value, offset, search_side, slice_property,
search_value):
if search_value is None:
return starting_value
try:
slc = self.get_loc(search_value)
if not is_unique:
# get_loc will return a boolean array for non_uniques
# if we are not monotonic
if isinstance(slc, (np.ndarray, Index)):
raise KeyError("cannot peform a slice operation "
"on a non-unique non-monotonic index")
if isinstance(slc, slice):
slc = getattr(slc, slice_property)
else:
slc += offset
except KeyError:
if self.is_monotonic:
# we are duplicated but non-unique
# so if we have an indexer then we are done
# else search for it (GH 7523)
if not is_unique and is_integer(search_value):
slc = search_value
else:
slc = self.searchsorted(search_value,
side=search_side)
else:
raise
return slc
start_slice = _get_slice(0, offset=0, search_side='left',
slice_property='start', search_value=start)
end_slice = _get_slice(len(self), offset=1, search_side='right',
slice_property='stop', search_value=end)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return Index(np.delete(self._data, loc), name=self.name)
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item_idx = Index([item], dtype=self.dtype).values
idx = np.concatenate(
(_self[:loc], item_idx, _self[loc:]))
return Index(idx, name=self.name)
def drop(self, labels):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
Returns
-------
dropped : Index
"""
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
raise ValueError('labels %s not contained in axis' % labels[mask])
return self.delete(indexer)
@Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, take_last=False):
result = super(Index, self).drop_duplicates(take_last=take_last)
return self._constructor(result)
@Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, take_last=False):
return super(Index, self).duplicated(take_last=take_last)
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable """
def _make_invalid_op(opstr):
def _invalid_op(self, other=None):
raise TypeError("cannot perform {opstr} with this index type: {typ}".format(opstr=opstr,
typ=type(self)))
return _invalid_op
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
cls.__neg__ = _make_invalid_op('__neg__')
cls.__pos__ = _make_invalid_op('__pos__')
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
@classmethod
def _add_numeric_methods(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr):
def _evaluate_numeric_binop(self, other):
import pandas.tseries.offsets as offsets
# if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
typ=type(self)))
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} with type: {typ}".format(opstr=type(self),
typ=type(other)))
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with unequal lengths")
other = _values_from_object(other)
if other.dtype.kind not in ['f','i']:
raise TypeError("cannot evaluate a numeric op with a non-numeric dtype")
elif isinstance(other, (offsets.DateOffset, np.timedelta64, Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
else:
if not (com.is_float(other) or com.is_integer(other)):
raise TypeError("can only perform ops with scalar values")
return self._shallow_copy(op(self.values, other))
return _evaluate_numeric_binop
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
# if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
typ=type(self)))
return self._shallow_copy(op(self.values))
return _evaluate_numeric_unary
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(operator.mul,'__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_evaluate_binop(operator.floordiv,'__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_evaluate_binop(operator.truediv,'__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_evaluate_binop(operator.div,'__div__')
cls.__neg__ = _make_evaluate_unary(lambda x: -x,'__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x,'__pos__')
cls.__abs__ = _make_evaluate_unary(lambda x: np.abs(x),'__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x,'__inv__')
Index._add_numeric_methods_disabled()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
class Int64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Int64Index is a special case
of `Index` with purely integer labels. This is the default index type used
by the DataFrame and Series ctors when no explicit index is provided by the
user.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: int64)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Index instance can **only** contain hashable objects
"""
_typ = 'int64index'
_groupby = _algos.groupby_int64
_arrmap = _algos.arrmap_int64
_left_indexer_unique = _algos.left_join_indexer_unique_int64
_left_indexer = _algos.left_join_indexer_int64
_inner_indexer = _algos.inner_join_indexer_int64
_outer_indexer = _algos.outer_join_indexer_int64
_engine_type = _index.Int64Engine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
elif issubclass(data.dtype.type, np.integer):
# don't force the upcast as we may be dealing
# with a platform int
if dtype is None or not issubclass(np.dtype(dtype).type,
np.integer):
dtype = np.int64
subarr = np.array(data, dtype=dtype, copy=copy)
else:
subarr = np.array(data, dtype=np.int64, copy=copy)
if len(data) > 0:
if (subarr != data).any():
raise TypeError('Unsafe NumPy casting to integer, you must'
' explicitly cast')
return cls._simple_new(subarr, name=name)
@property
def inferred_type(self):
return 'integer'
@cache_readonly
def hasnans(self):
# by definition
return False
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
# if not isinstance(other, Int64Index):
# return False
try:
return array_equivalent(self, other)
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
Int64Index._add_numeric_methods()
class Float64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Float64Index is a special case
of `Index` with purely floating point labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Float64Index instance can **only** contain hashable objects
"""
_typ = 'float64index'
_engine_type = _index.Float64Engine
_groupby = _algos.groupby_float64
_arrmap = _algos.arrmap_float64
_left_indexer_unique = _algos.left_join_indexer_unique_float64
_left_indexer = _algos.left_join_indexer_float64
_inner_indexer = _algos.inner_join_indexer_float64
_outer_indexer = _algos.outer_join_indexer_float64
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name)
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if dtype is None:
dtype = np.float64
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except:
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
# coerce to float64 for storage
if subarr.dtype != np.float64:
subarr = subarr.astype(np.float64)
return cls._simple_new(subarr, name)
@property
def inferred_type(self):
return 'floating'
def astype(self, dtype):
if np.dtype(dtype) not in (np.object, np.float64):
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(self.values, name=self.name, dtype=dtype)
def _convert_scalar_indexer(self, key, typ=None):
if typ == 'iloc':
return super(Float64Index, self)._convert_scalar_indexer(key,
typ=typ)
return key
def _convert_slice_indexer(self, key, typ=None):
""" convert a slice indexer, by definition these are labels
unless we are iloc """
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if typ == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
typ=typ)
# allow floats here
validator = lambda v: v is None or is_integer(v) or is_float(v)
self._validate_slicer(key, validator)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step)
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not np.isscalar(key):
raise InvalidIndexError
from pandas.core.indexing import _maybe_droplevels
from pandas.core.series import Series
k = _values_from_object(key)
loc = self.get_loc(k)
new_values = _values_from_object(series)[loc]
if np.isscalar(new_values) or new_values is None:
return new_values
new_index = self[loc]
new_index = _maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (self._isnan & other._isnan)).all()
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and _try_get_item(other) in self
except TypeError:
return False
except:
return False
def get_loc(self, key):
try:
if np.all(np.isnan(key)):
try:
return self._nan_idxs.item()
except ValueError:
return self._nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
@cache_readonly
def _nan_idxs(self):
w, = self._isnan.nonzero()
return w
@cache_readonly
def _isnan(self):
return np.isnan(self.values)
@cache_readonly
def hasnans(self):
return self._isnan.any()
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
value_set = set(values)
if level is not None:
self._validate_index_level(level)
return lib.ismember_nans(self._array_values(), value_set,
isnull(list(value_set)).any())
Float64Index._add_numeric_methods()
class MultiIndex(Index):
"""
Implements multi-level, a.k.a. hierarchical, index object for pandas
objects
Parameters
----------
levels : sequence of arrays
The unique labels for each level
labels : sequence of arrays
Integers for each level designating which label at each location
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
Names for each of the index levels.
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_labels = FrozenList()
_comparables = ['names']
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
copy=False, verify_integrity=True, _set_identity=True, **kwargs):
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
raise ValueError('Length of levels and labels must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/labels')
if len(levels) == 1:
if names:
name = names[0]
else:
name = None
return Index(levels[0], name=name, copy=True).take(labels[0])
result = object.__new__(MultiIndex)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_labels(labels, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self):
"""Raises ValueError if length of levels and labels don't match or any
label would exceed level bounds"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
labels, levels = self.labels, self.levels
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
label_length = len(self.labels[0])
for i, (level, label) in enumerate(zip(levels, labels)):
if len(label) != label_length:
raise ValueError("Unequal label lengths: %s" % (
[len(lab) for lab in labels]))
if len(label) and label.max() >= len(level):
raise ValueError("On level %d, label max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, label.max(),
len(level)))
def _get_levels(self):
return self._levels
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(_ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
if verify_integrity:
self._verify_integrity()
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int or level name, or sequence of int / level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not com.is_list_like(level):
if not com.is_list_like(levels):
raise TypeError("Levels must be list-like")
if com.is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or com.is_list_like(level):
if not com.is_list_like(levels) or not com.is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to read only property
__set_levels = deprecate("setting `levels` directly",
partial(set_levels, inplace=True,
verify_integrity=True),
alt_name="set_levels")
levels = property(fget=_get_levels, fset=__set_levels)
def _get_labels(self):
return self._labels
def _set_labels(self, labels, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(labels) != self.nlevels:
raise ValueError("Length of labels must match number of levels")
if validate and level is not None and len(labels) != len(level):
raise ValueError('Length of labels must match length of levels.')
if level is None:
new_labels = FrozenList(_ensure_frozen(v, copy=copy)._shallow_copy()
for v in labels)
else:
level = [self._get_level_number(l) for l in level]
new_labels = list(self._labels)
for l, v in zip(level, labels):
new_labels[l] = _ensure_frozen(v, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
self._labels = new_labels
self._tuples = None
self._reset_cache()
if verify_integrity:
self._verify_integrity()
def set_labels(self, labels, level=None, inplace=False, verify_integrity=True):
"""
Set new labels on MultiIndex. Defaults to returning
new index.
Parameters
----------
labels : sequence or list of sequence
new labels to apply
level : int or level name, or sequence of int / level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not com.is_list_like(level):
if not com.is_list_like(labels):
raise TypeError("Labels must be list-like")
if com.is_list_like(labels[0]):
raise TypeError("Labels must be list-like")
level = [level]
labels = [labels]
elif level is None or com.is_list_like(level):
if not com.is_list_like(labels) or not com.is_list_like(labels[0]):
raise TypeError("Labels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_labels(labels, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to readonly property
__set_labels = deprecate("setting labels directly",
partial(set_labels, inplace=True,
verify_integrity=True),
alt_name="set_labels")
labels = property(fget=_get_labels, fset=__set_labels)
def copy(self, names=None, dtype=None, levels=None, labels=None,
deep=False, _set_identity=False):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
labels : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
if deep:
from copy import deepcopy
levels = levels if levels is not None else deepcopy(self.levels)
labels = labels if labels is not None else deepcopy(self.labels)
names = names if names is not None else deepcopy(self.names)
else:
levels = self.levels
labels = self.labels
names = self.names
return MultiIndex(levels=levels,
labels=labels,
names=names,
sortorder=self.sortorder,
verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, result=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
_shallow_copy = view
def _array_values(self):
# hack for various methods
return self.values
@cache_readonly
def dtype(self):
return np.dtype('O')
def __repr__(self):
encoding = get_option('display.encoding')
attrs = [('levels', default_pprint(self.levels)),
('labels', default_pprint(self.labels))]
if not all(name is None for name in self.names):
attrs.append(('names', default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', default_pprint(self.sortorder)))
space = ' ' * (len(self.__class__.__name__) + 1)
prepr = (u(",\n%s") % space).join([u("%s=%s") % (k, v)
for k, v in attrs])
res = u("%s(%s)") % (self.__class__.__name__, prepr)
if not compat.PY3:
# needs to be str in Python 2
res = res.encode(encoding)
return res
def __unicode__(self):
"""
Return a string representation for a particular Index
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
rows = self.format(names=True)
max_rows = get_option('display.max_rows')
if len(rows) > max_rows:
spaces = (len(rows[0]) - 3) // 2
centered = ' ' * spaces
half = max_rows // 2
rows = rows[:half] + [centered + '...' + centered] + rows[-half:]
return "\n".join(rows)
def __len__(self):
return len(self.labels[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError(
'Length of names must match number of levels in MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
self.levels[l].rename(name, inplace=True)
names = property(
fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex")
def _reference_duplicate_name(self, name):
"""
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
return np.sum(name == np.asarray(self.names)) > 1
def _format_native_types(self, **kwargs):
return self.tolist()
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
@staticmethod
def _from_elements(values, labels=None, levels=None, names=None,
sortorder=None):
return MultiIndex(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
count = self.names.count(level)
if count > 1:
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
level = self.names.index(level)
except ValueError:
if not isinstance(level, int):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
'Too many levels: Index has only %d levels, '
'%d is not a valid level number' % (self.nlevels, orig_level)
)
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for lev, lab in zip(self.levels, self.labels):
taken = com.take_1d(lev.values, lab)
# Need to box timestamps, etc.
if hasattr(lev, '_box_values'):
taken = lev._box_values(taken)
values.append(taken)
self._tuples = lib.fast_zip(values)
return self._tuples
# fml
@property
def _is_v1(self):
return False
@property
def _is_v2(self):
return False
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@property
def has_duplicates(self):
"""
Return True if there are no unique groups
"""
# has duplicates
shape = [len(lev) for lev in self.levels]
group_index = np.zeros(len(self), dtype='i8')
for i in range(len(shape)):
stride = np.prod([x for x in shape[i + 1:]], dtype='i8')
group_index += self.labels[i] * stride
if len(np.unique(group_index)) < len(group_index):
return True
return False
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import _maybe_droplevels
from pandas.core.series import Series
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series.values[loc]
new_index = self[loc]
new_index = _maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return _index.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if com.is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if isinstance(key, (datetime.datetime, np.datetime64)) or (
compat.PY3 and isinstance(key, compat.string_types)):
try:
return _try_mi(key)
except (KeyError):
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int or level name
Returns
-------
values : ndarray
"""
num = self._get_level_number(level)
unique = self.levels[num] # .values
labels = self.labels[num]
filled = com.take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._simple_new(filled, self.names[num],
freq=getattr(unique, 'freq', None),
tz=getattr(unique, 'tz', None))
return values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(lab).format(formatter=formatter)
# we have some NA
mask = lab == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [com.pprint_thing(na if isnull(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in com.take_1d(lev.values, lab)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels,
start=int(names),
sentinel=sentinel)
if adjoin:
return com.adjoin(space, *result_levels).split('\n')
else:
return result_levels
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(1) for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the labels are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
def is_lexsorted_for_tuple(self, tup):
"""
Return True if we are correctly lexsorted given the passed tuple
"""
return len(tup) <= self.lexsort_depth
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_labels = [com._ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
if lib.is_lexsorted(int64_labels[:k]):
return k
return 0
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> MultiIndex.from_arrays(arrays, names=('number', 'color'))
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
from pandas.core.categorical import Categorical
if len(arrays) == 1:
name = None if names is None else names[0]
return Index(arrays[0], name=name)
cats = [Categorical.from_array(arr) for arr in arrays]
levels = [c.levels for c in cats]
labels = [c.codes for c in cats]
if names is None:
names = [c.name for c in cats]
return MultiIndex(levels=levels, labels=labels,
sortorder=sortorder, names=names,
verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
(2, u'red'), (2, u'blue')]
>>> MultiIndex.from_tuples(tuples, names=('number', 'color'))
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if len(tuples) == 0:
# I think this is right? Not quite sure...
raise TypeError('Cannot infer number of levels from empty list')
if isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples.values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder,
names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import Categorical
from pandas.tools.util import cartesian_product
categoricals = [Categorical.from_array(it) for it in iterables]
labels = cartesian_product([c.codes for c in categoricals])
return MultiIndex(levels=[c.levels for c in categoricals],
labels=labels, sortorder=sortorder, names=names)
@property
def nlevels(self):
return len(self.levels)
@property
def levshape(self):
return tuple(len(x) for x in self.levels)
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
self.get_loc(key)
return True
except KeyError:
return False
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels = [lev.view(np.ndarray) for lev in self.levels],
labels = [label.view(np.ndarray) for label in self.labels],
sortorder = self.sortorder,
names = list(self.names))
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
labels = state.get('labels')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, labels, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_labels(labels)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
def __getitem__(self, key):
if np.isscalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[lab[key]])
return tuple(retval)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
new_labels = [lab[key] for lab in self.labels]
return MultiIndex(levels=self.levels,
labels=new_labels,
names=self.names,
sortorder=sortorder,
verify_integrity=False)
def take(self, indexer, axis=None):
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other):
arrays = []
for i in range(self.nlevels):
label = self.get_level_values(i)
appended = [o.get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k.values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort()
def repeat(self, n):
return MultiIndex(levels=self.levels,
labels=[label.view(np.ndarray).repeat(n) for label in self.labels],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False)
def drop(self, labels, level=None):
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
labels : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(labels, level)
try:
if not isinstance(labels, (np.ndarray, Index)):
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
raise ValueError('labels %s not contained in axis'
% labels[mask])
return self.delete(indexer)
except Exception:
pass
inds = []
for label in labels:
loc = self.get_loc(label)
if isinstance(loc, int):
inds.append(loc)
else:
inds.extend(lrange(loc.start, loc.stop))
return self.delete(inds)
def _drop_from_level(self, labels, level):
labels = com._index_labels_to_array(labels)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(labels)
mask = ~lib.ismember(self.labels[i], set(values))
return self[mask]
def droplevel(self, level=0):
"""
Return Index with requested level removed. If MultiIndex has only 2
levels, the result will be of Index type not MultiIndex.
Parameters
----------
level : int/level name or list thereof
Notes
-----
Does not check if result index is unique or not
Returns
-------
index : Index or MultiIndex
"""
levels = level
if not isinstance(levels, (tuple, list)):
levels = [level]
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def swaplevel(self, i, j):
"""
Swap level i with level j. Do not change the ordering of anything
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : MultiIndex
"""
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(('Length of order must be same as '
'number of levels (%d), got %d')
% (self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
sort_remaining : sort by the remaining levels after level.
Returns
-------
sorted_index : MultiIndex
"""
from pandas.core.groupby import _indexer_from_factorized
labels = list(self.labels)
shape = list(self.levshape)
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
# partition labels and shape
primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(labels)
primshp += primshp + tuple(shape)
sortorder = None
else:
sortorder = level[0]
indexer = _indexer_from_factorized(primary,
primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
new_index = MultiIndex(labels=new_labels, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def get_indexer(self, target, method=None, limit=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index. The mask determines whether labels are
found or not in the current index
Parameters
----------
target : MultiIndex or Index (of tuples)
method : {'pad', 'ffill', 'backfill', 'bfill'}
pad / ffill: propagate LAST valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Notes
-----
This is a low-level method and probably should be used at your own risk
Examples
--------
>>> indexer, mask = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
>>> new_values[-mask] = np.nan
Returns
-------
(indexer, mask) : (ndarray, ndarray)
"""
method = self._get_method(method)
target = _ensure_index(target)
target_index = target
if isinstance(target, MultiIndex):
target_index = target._tuple_index
if target_index.dtype != object:
return np.ones(len(target_index)) * -1
if not self.is_unique:
raise Exception('Reindexing only valid with uniquely valued Index '
'objects')
self_index = self._tuple_index
if method == 'pad':
if not self.is_unique or not self.is_monotonic:
raise AssertionError(('Must be unique and monotonic to '
'use forward fill getting the indexer'))
indexer = self_index._engine.get_pad_indexer(target_index.values,
limit=limit)
elif method == 'backfill':
if not self.is_unique or not self.is_monotonic:
raise AssertionError(('Must be unique and monotonic to '
'use backward fill getting the indexer'))
indexer = self_index._engine.get_backfill_indexer(target_index.values,
limit=limit)
else:
indexer = self_index._engine.get_indexer(target_index.values)
return com._ensure_platform_int(indexer)
def reindex(self, target, method=None, level=None, limit=None,
copy_if_needed=False):
"""
Performs any necessary conversion on the input index and calls
get_indexer. This method is here so MultiIndex and an Index of
like-labeled tuples can play nice together
Returns
-------
(new_index, indexer, mask) : (MultiIndex, ndarray, ndarray)
"""
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
target = _ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit)
else:
raise Exception(
"cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
return target, indexer
@cache_readonly
def _tuple_index(self):
"""
Convert MultiIndex to an Index of tuples
Returns
-------
index : Index
"""
return Index(self.values)
def slice_locs(self, start=None, end=None, strict=False):
"""
For an ordered MultiIndex, compute the slice locations for input
labels. They can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
strict : boolean,
Returns
-------
(start, end) : (int, int)
Notes
-----
This function assumes that the data is sorted by the first level
"""
if start is None:
start_slice = 0
else:
if not isinstance(start, tuple):
start = start,
start_slice = self._partial_tup_index(start, side='left')
if end is None:
end_slice = len(self)
else:
if not isinstance(end, tuple):
end = end,
end_slice = self._partial_tup_index(end, side='right')
return start_slice, end_slice
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise KeyError('Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key):
"""
Get integer location slice for requested label or tuple
Parameters
----------
key : label or tuple
Returns
-------
loc : int or slice object
"""
if isinstance(key, tuple):
if len(key) == self.nlevels:
if self.is_unique:
return self._engine.get_loc(_values_from_object(key))
else:
return slice(*self.slice_locs(key, key))
else:
# partial selection
result = slice(*self.slice_locs(key, key))
if result.start == result.stop:
raise KeyError(key)
return result
else:
return self._get_level_indexer(key, level=0)
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get integer location slice for requested label or tuple
Parameters
----------
key : label or tuple
level : int/level name or list thereof
Returns
-------
loc : int or slice object
"""
def _maybe_drop_levels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, _maybe_drop_levels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = _maybe_drop_levels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
def partial_selection(key):
indexer = slice(*self.slice_locs(key, key))
if indexer.start == indexer.stop:
raise KeyError(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, _maybe_drop_levels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels:
if self.is_unique:
# here we have a completely specified key, but are
# using some partial string matching here
# GH4758
can_index_exactly = any([
(l.is_all_dates and
not isinstance(k, compat.string_types))
for k, l in zip(key, self.levels)
])
if any([
l.is_all_dates for k, l in zip(key, self.levels)
]) and not can_index_exactly:
indexer = slice(*self.slice_locs(key, key))
# we have a multiple selection here
if not indexer.stop - indexer.start == 1:
return partial_selection(key)
key = tuple(self[indexer].tolist()[0])
return (self._engine.get_loc(_values_from_object(key)),
None)
else:
return partial_selection(key)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, _maybe_drop_levels(indexer, ilevels,
drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, _maybe_drop_levels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0):
# return a boolean indexer or a slice showing where the key is
# in the totality of values
level_index = self.levels[level]
labels = self.labels[level]
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index)-1
step = key.step
except (KeyError):
# we have a partial slice (like looking up a partial date string)
start = stop = level_index.slice_indexer(key.start, key.stop, key.step)
step = start.step
if isinstance(start,slice) or isinstance(stop,slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
m = np.zeros(len(labels),dtype=bool)
m[np.in1d(labels,np.arange(start.start,stop.stop,step))] = True
return m
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
m = np.zeros(len(labels),dtype=bool)
m[np.in1d(labels,np.arange(start,stop+1,step))] = True
return m
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(start, side='left')
j = labels.searchsorted(stop, side='right')
return slice(i, j, step)
else:
loc = level_index.get_loc(key)
if level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc,dtype=bool)
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
def get_locs(self, tup):
"""
Given a tuple of slices/lists/labels/boolean indexer to a level-wise spec
produce an indexer to extract those locations
Parameters
----------
key : tuple of (slices/list/labels)
Returns
-------
locs : integer list of locations or boolean indexer suitable
for passing to iloc
"""
from pandas.core.indexing import _is_null_slice
# must be lexsorted to at least as many levels
if not self.is_lexsorted_for_tuple(tup):
raise KeyError('MultiIndex Slicing requires the index to be fully lexsorted'
' tuple len ({0}), lexsort depth ({1})'.format(len(tup), self.lexsort_depth))
def _convert_indexer(r):
if isinstance(r, slice):
m = np.zeros(len(self),dtype=bool)
m[r] = True
return m
return r
ranges = []
for i,k in enumerate(tup):
if com._is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
if len(k) != len(self):
raise ValueError("cannot index with a boolean indexer that is"
" not the same length as the index")
ranges.append(k)
elif com.is_list_like(k):
# a collection of labels to include from this level (these are or'd)
indexers = []
for x in k:
try:
indexers.append(_convert_indexer(self._get_level_indexer(x, level=i)))
except (KeyError):
# ignore not founds
continue
ranges.append(reduce(np.logical_or,indexers))
elif _is_null_slice(k):
# empty slice
pass
elif isinstance(k,slice):
# a slice, include BOTH of the labels
ranges.append(self._get_level_indexer(k,level=i))
else:
# a single label
ranges.append(self.get_loc_level(k,level=i,drop_level=False)[0])
# identity
if len(ranges) == 0:
return slice(0,len(self))
elif len(ranges) == 1:
return ranges[0]
# construct a boolean indexer if we have a slice or boolean indexer
return reduce(np.logical_and,[ _convert_indexer(r) for r in ranges ])
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
return MultiIndex(levels=new_levels, labels=new_labels,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, MultiIndex):
return array_equivalent(self.values, _ensure_index(other))
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
svalues = com.take_nd(self.levels[i].values, self.labels[i],
allow_fill=False)
ovalues = com.take_nd(other.levels[i].values, other.labels[i],
allow_fill=False)
if not array_equivalent(svalues, ovalues):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
result_names = self.names if self.names == other.names else None
uniq_tuples = lib.fast_unique_multiple([self.values, other.values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self
result_names = self.names if self.names == other.names else None
self_tuples = self.values
other_tuples = other.values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, MultiIndex):
if len(other) == 0:
return self
try:
other = MultiIndex.from_tuples(other)
except:
raise TypeError('other must be a MultiIndex or a list of'
' tuples')
result_names = self.names
else:
result_names = self.names if self.names == other.names else None
if self.equals(other):
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
difference = sorted(set(self.values) - set(other.values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
def _assert_can_do_setop(self, other):
pass
def astype(self, dtype):
if np.dtype(dtype) != np.object_:
raise TypeError('Setting %s dtype to anything other than object '
'is not supported' % self.__class__)
return self._shallow_copy()
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item,) + ('',) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError(
'Item must have length equal to number of levels.')
new_levels = []
new_labels = []
for k, level, labels in zip(item, self.levels, self.labels):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other labels
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_labels.append(np.insert(labels, loc, lev_loc))
return MultiIndex(levels=new_levels, labels=new_labels,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
__bounds = None
@property
def _bounds(self):
"""
Return or compute and return slice points for level 0, assuming
sortedness
"""
if self.__bounds is None:
inds = np.arange(len(self.levels[0]))
self.__bounds = self.labels[0].searchsorted(inds)
return self.__bounds
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
return lib.ismember(self._array_values(), set(values))
else:
num = self._get_level_number(level)
levs = self.levels[num]
labs = self.labels[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(labs), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(labs, sought_labels)
MultiIndex._add_numeric_methods_disabled()
# For utility purposes
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _ensure_index(index_like, copy=False):
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
# 2200 ?
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _ensure_frozen(array_like, copy=False):
array_like = np.asanyarray(array_like, dtype=np.int_)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise ValueError('do not recognize join method %s' % method)
# TODO: handle index names!
def _get_combined_index(indexes, intersect=False):
indexes = _get_distinct_indexes(indexes)
if len(indexes) == 0:
return Index([])
if len(indexes) == 1:
return indexes[0]
if intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
return index
union = _union_indexes(indexes)
return _ensure_index(union)
def _get_distinct_indexes(indexes):
return list(dict((id(x), x) for x in indexes).values())
def _union_indexes(indexes):
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(lib.fast_unique_multiple_list([ conv(i) for i in inds ]))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
return _unique_indices(indexes)
return index
else:
return _unique_indices(indexes)
def _trim_front(strings):
"""
Trims zeros and decimal points
"""
trimmed = strings
while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _sanitize_and_check(indexes):
kinds = list(set([type(index) for index in indexes]))
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com._try_sort(x))
if not isinstance(x, Index) else x
for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = set([
tuple(i.names) for i in indexes if all(n is not None for n in i.names)
])
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
def _maybe_box(idx):
from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
if isinstance(idx, klasses):
return idx.asobject
return idx
def _all_indexes_same(indexes):
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
def _get_na_value(dtype):
return {np.datetime64: tslib.NaT, np.timedelta64: tslib.NaT}.get(dtype,
np.nan)
| 33.954565 | 120 | 0.550761 |
6633f6aeeece440a9f3667add45b39eb18d45fcc | 913 | py | Python | scripts/time_methods.py | Lila14/multimds | e54642e0ae47592321352f931f534881ca57d888 | [
"MIT"
] | 1 | 2019-10-29T12:33:57.000Z | 2019-10-29T12:33:57.000Z | scripts/time_methods.py | Lila14/multimds | e54642e0ae47592321352f931f534881ca57d888 | [
"MIT"
] | null | null | null | scripts/time_methods.py | Lila14/multimds | e54642e0ae47592321352f931f534881ca57d888 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
import numpy as np
chrom_sizes = np.loadtxt("chrom_sizes.txt")
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
maxs = []
for method in ("MultiMDS", "Independent_MDS"):
times = np.loadtxt("{}_times.txt".format(method))/60.
plt.scatter(chrom_sizes, times, label=" ".join(method.split("_")))
maxs.append(max(times))
xs = chrom_sizes
x_int_size = 500
y_int_size = 1
x_start = min(xs) - x_int_size/4.
x_end = max(xs) + x_int_size/5.
y_start = -y_int_size/5.
y_end = max(maxs) + y_int_size/5.
plt.xlabel("Number of bins", fontsize=14)
plt.ylabel("Computational time (minutes)", fontsize=14)
plt.axis([x_start, x_end, y_start, y_end], frameon=False)
plt.axvline(x=x_start, color="k", lw=4)
plt.axhline(y=y_start, color="k", lw=6)
plt.legend()
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=5, labelsize=10)
plt.savefig("time_methods")
| 31.482759 | 97 | 0.715225 |
93a8c0256ef43256b305fafd63f3551d13818200 | 9,163 | py | Python | train.py | dlordtemplar/keras-base-rest | f225d6496ed2e876570eb29d0ba155a0d7b548ac | [
"MIT"
] | null | null | null | train.py | dlordtemplar/keras-base-rest | f225d6496ed2e876570eb29d0ba155a0d7b548ac | [
"MIT"
] | 6 | 2020-01-28T22:09:54.000Z | 2022-02-09T23:29:23.000Z | train.py | dlordtemplar/keras-base-rest | f225d6496ed2e876570eb29d0ba155a0d7b548ac | [
"MIT"
] | null | null | null | # SiameseLSTM_visualization_semeval.ipynb
import os
import notebook_util
print(notebook_util.list_available_gpus())
notebook_util.setup_one_gpu()
import tensorflow as tf
from keras.engine import Model
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf.gpu_options.per_process_gpu_memory_fraction = 0.1
from keras import backend as K
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import pickle
import keras
from sklearn.utils import class_weight
import gensim
from keras.layers import *
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
EMBEDDING_FOLDER = "resources/embeddings/"
EMBEDDING_DIM = 300
WORD2VEC_PATH = EMBEDDING_FOLDER + "GoogleNews-vectors-negative300.bin"
DATA_PATH = 'out/data/semeval/'
MODEL_PATH = 'out/data/semeval/models/'
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
max_length = 200
answer_texts_train = pickle.load(open(DATA_PATH + "answer_texts_train_Ling.p", "rb"))
train = pickle.load(open(DATA_PATH + "train-expanded_Ling.p", "rb"))
answer_texts_dev = pickle.load(open(DATA_PATH + "answer_texts_dev_Ling.p", "rb"))
dev = pickle.load(open(DATA_PATH + "dev-Labels_Ling.p", "rb"))
answer_texts_train = answer_texts_train.set_index('answer_id')
correct_answer_ids = set(train['answer_id'].values)
incorrect_answer_ids = [x for x in answer_texts_train.index.values if x not in correct_answer_ids]
incorrect_answer_texts = answer_texts_train.loc[incorrect_answer_ids]
texts = list([x.text for x in train['question'].values]) + list([x.text for x in train['answer'].values]) + list(
[x.text for x in incorrect_answer_texts['answer'].values])
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
vocabulary = tokenizer.word_index
vocabulary_inv = {v: k for k, v in vocabulary.items()}
embeddings_index = gensim.models.KeyedVectors.load_word2vec_format(WORD2VEC_PATH, binary=True, unicode_errors='ignore')
embedding_matrix = np.zeros((len(vocabulary) + 2, EMBEDDING_DIM)) # oov + pad vector
oov_vector = np.zeros(EMBEDDING_DIM)
for word, i in vocabulary.items():
if word in embeddings_index.wv.vocab:
embedding_vector = embeddings_index[word]
embedding_matrix[i] = embedding_vector
elif word.lower() in embeddings_index.wv.vocab:
embedding_vector = embeddings_index[word.lower()]
embedding_matrix[i] = embedding_vector
elif word.capitalize() in embeddings_index.wv.vocab:
embedding_vector = embeddings_index[word.capitalize()]
embedding_matrix[i] = embedding_vector
elif word.lower().capitalize() in embeddings_index.wv.vocab:
embedding_vector = embeddings_index[word.lower().capitalize()]
embedding_matrix[i] = embedding_vector
else:
embedding_matrix[i] = oov_vector
questions = []
wrong_answers = []
for idx, row in train.iterrows():
for y in row['pool']:
wrong_answers.append(answer_texts_train.loc[y]['answer'].text)
questions.append(row['question'].text)
correct_answers = []
for idx, row in train.iterrows():
correct_answers.append(row['answer'].text)
questions.append(row['question'].text)
data = [(x, 0) for x in wrong_answers] + [(x, 1) for x in correct_answers]
data_answers = [x[0] for x in data]
data_questions = questions
data_targets = [x[1] for x in data]
X_train_a_text, X_validation_a_text, X_train_q_text, X_validation_q_text, Y_train, Y_validation = train_test_split(
data_answers, data_questions, data_targets, test_size=0.2)
X_train_a = tokenizer.texts_to_sequences(X_train_a_text)
X_validation_a = tokenizer.texts_to_sequences(X_validation_a_text)
X_train_q = tokenizer.texts_to_sequences(X_train_q_text)
X_validation_q = tokenizer.texts_to_sequences(X_validation_q_text)
X_train_a = pad_sequences(X_train_a, maxlen=max_length, value=embedding_matrix.shape[0] - 1)
X_validation_a = pad_sequences(X_validation_a, maxlen=max_length, value=embedding_matrix.shape[0] - 1)
X_train_q = pad_sequences(X_train_q, maxlen=max_length, value=embedding_matrix.shape[0] - 1)
X_validation_q = pad_sequences(X_validation_q, maxlen=max_length, value=embedding_matrix.shape[0] - 1)
Y_train = np.array(Y_train)
Y_validation = np.array(Y_validation)
with open(DATA_PATH + 'tokenizer.p', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(DATA_PATH + 'embedding_matrix.p', 'wb') as handle:
pickle.dump(embedding_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)
class_weights = class_weight.compute_class_weight('balanced', np.unique(Y_train), Y_train)
class_weights = {i: x for i, x in enumerate(list(class_weights))}
print(class_weights)
adam = keras.optimizers.Adam(clipnorm=1.)
SEED = 42
MAX_LENGTH = 200
M = 96
N = 64
gaussian_noise = 0
unidirectional = False
trainable_embeddings = False
mean_pooling = False
initializer = keras.initializers.he_normal(seed=SEED)
dropout = False
embedding_layer = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=MAX_LENGTH, trainable=trainable_embeddings)
a_input = Input(shape=(MAX_LENGTH,), dtype='int32')
q_input = Input(shape=(MAX_LENGTH,), dtype='int32')
embedded_a = embedding_layer(a_input)
embedded_q = embedding_layer(q_input)
if gaussian_noise != 0:
embedded_a = keras.layers.GaussianNoise(gaussian_noise)(embedded_a)
embedded_q = keras.layers.GaussianNoise(gaussian_noise)(embedded_q)
if unidirectional:
if dropout:
shared_lstm = keras.layers.LSTM(M, return_sequences=True, recurrent_dropout=0.2, dropout=0.5,
kernel_initializer=initializer, name='SharedLSTM1')
shared_lstm2 = keras.layers.LSTM(N, return_sequences=True, recurrent_dropout=0.2, dropout=0.5,
kernel_initializer=initializer, name='SharedLSTM2')
else:
shared_lstm = keras.layers.CuDNNLSTM(M, return_sequences=True, kernel_initializer=initializer,
name='SharedLSTM1')
shared_lstm2 = keras.layers.CuDNNLSTM(N, return_sequences=True, kernel_initializer=initializer,
name='SharedLSTM2')
N_output = N
else:
if dropout:
shared_lstm = Bidirectional(keras.layers.LSTM(M, return_sequences=True, recurrent_dropout=0.2, dropout=0.5,
kernel_initializer=initializer), name='SharedLSTM')
shared_lstm2 = Bidirectional(keras.layers.LSTM(N, return_sequences=True, recurrent_dropout=0.2, dropout=0.5,
kernel_initializer=initializer), name='SharedLSTM')
else:
shared_lstm = Bidirectional(keras.layers.CuDNNLSTM(M, return_sequences=True, kernel_initializer=initializer),
name='SharedLSTM1')
shared_lstm2 = Bidirectional(keras.layers.CuDNNLSTM(N, return_sequences=True, kernel_initializer=initializer),
name='SharedLSTM2')
N_output = 2 * N
a_lstm_intermediate = shared_lstm(embedded_a)
a_lstm_intermediate = keras.layers.BatchNormalization()(a_lstm_intermediate)
a_lstm_output = shared_lstm2(a_lstm_intermediate)
a_lstm_output_viz = Lambda(lambda x: x[:, -1, :], output_shape=(N_output,), name='a_lstm_output_viz')(
a_lstm_output) # only needed for visualization
q_lstm_intermediate = shared_lstm(embedded_q)
q_lstm_intermediate = keras.layers.BatchNormalization()(q_lstm_intermediate)
q_lstm_output = shared_lstm2(q_lstm_intermediate)
q_lstm_output_viz = Lambda(lambda x: x[:, -1, :], output_shape=(N_output,), name='q_lstm_output_viz')(
q_lstm_output) # only needed for visualization
O_q = GlobalMaxPooling1D(name='max_pool_q')(q_lstm_output)
q_vec = Dense(N_output, name='W_qm')(O_q)
q_vec = RepeatVector(200)(q_vec)
a_vec = TimeDistributed(Dense(N_output, name='W_am'))(a_lstm_output)
m = Add()([q_vec, a_vec])
m = Activation(activation='tanh')(m)
s = TimeDistributed(Dense(1, name='w_ms'))(m)
s = keras.layers.Softmax(axis=1, name='attention_scores')(s)
# name='attended_a'
h_hat_a = Multiply()([a_lstm_output, s])
O_a = GlobalMaxPooling1D(name='max_pool_attended_a')(h_hat_a)
# print("O_q[0:5]", O_q[0:5])
# print("O_a[0:5]", O_a[0:5])
x = Dot(axes=-1, normalize=True)([O_q, O_a])
# x = Flatten()(x)
model = Model([a_input, q_input], x)
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['acc'])
model.summary()
model.fit([X_train_a, X_train_q], Y_train, validation_data=([X_validation_a, X_validation_q], Y_validation),
epochs=4, batch_size=20, class_weight=class_weights)
model_json = model.to_json()
with open(MODEL_PATH + "model_visualization_siamesedeeplstm.json", "w") as json_file:
json_file.write(model_json)
model.save_weights(MODEL_PATH + "model_visualization_siamesedeeplstm.h5")
print("Saved model to disk")
| 41.65 | 119 | 0.73142 |
6a05c3f9bd2c9f7e01d6fdb8a0bb478755787663 | 5,700 | py | Python | data_tracking/nifti_import.py | HBPMedical/data-tracking | f645a0d6426e6019c92d5aaf4be225cff2864417 | [
"Apache-2.0"
] | 5 | 2017-06-04T16:05:22.000Z | 2022-03-21T11:50:09.000Z | data_tracking/nifti_import.py | HBPMedical/data-tracking | f645a0d6426e6019c92d5aaf4be225cff2864417 | [
"Apache-2.0"
] | null | null | null | data_tracking/nifti_import.py | HBPMedical/data-tracking | f645a0d6426e6019c92d5aaf4be225cff2864417 | [
"Apache-2.0"
] | 2 | 2016-11-21T17:50:01.000Z | 2016-12-23T12:51:19.000Z | import logging
import re
from . import utils
#######################################################################################################################
# PUBLIC FUNCTIONS
#######################################################################################################################
def nifti2db(file_path, file_type, is_copy, step_id, db_conn, sid_by_patient=False, pid_in_vid=False):
"""Extract some meta-data from NIFTI files (actually mostly from their paths) and stores it in a DB.
Arguments:
:param file_path: File path.
:param file_type: File type.
:param is_copy: Indicate if this file is a copy.
:param step_id: Step ID.
:param db_conn: Database connection.
:param sid_by_patient: Rarely, a data set might use study IDs which are unique by patient
(not for the whole study).
E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID.
:param pid_in_vid: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you
to enable this flag. This will try to split PatientID into VisitID and PatientID.
:return:
"""
logging.info("Processing '%s'" % file_path)
df = db_conn.db_session.query(db_conn.DataFile).filter_by(path=file_path).one_or_none()
dataset = db_conn.get_dataset(step_id)
_extract_participant(db_conn, file_path, pid_in_vid, dataset)
visit_id = _extract_visit(db_conn, file_path, pid_in_vid, sid_by_patient, dataset)
session_id = _extract_session(db_conn, file_path, visit_id)
sequence_id = _extract_sequence(db_conn, file_path, session_id)
repetition_id = _extract_repetition(db_conn, file_path, sequence_id)
if not df:
df = db_conn.DataFile(
path=file_path,
type=file_type,
is_copy=is_copy,
processing_step_id=step_id,
repetition_id=repetition_id
)
db_conn.db_session.merge(df)
db_conn.db_session.commit()
else:
if file_type not in [None, '', df.type]:
df.type = file_type
db_conn.db_session.commit()
if is_copy not in [None, df.is_copy]:
df.is_copy = is_copy
db_conn.db_session.commit()
if step_id not in [None, df.processing_step_id]:
df.processing_step_id = step_id
db_conn.db_session.commit()
if repetition_id not in [None, df.repetition_id]:
df.repetition_id = repetition_id
db_conn.db_session.commit()
#######################################################################################################################
# PRIVATE FUNCTIONS
#######################################################################################################################
def _extract_participant(db_conn, file_path, pid_in_vid, dataset):
participant_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
if pid_in_vid:
try:
participant_name = utils.split_patient_id(participant_name)[1]
except TypeError:
pass
participant_id = db_conn.get_participant_id(participant_name, dataset)
# Sync participant table with participant_mapping table
participant = db_conn.db_session.query(db_conn.Participant).filter_by(id=participant_id).one_or_none()
if not participant:
participant = db_conn.Participant(
id=participant_id
)
db_conn.db_session.merge(participant)
return participant_id
def _extract_session(db_conn, file_path, visit_id):
try:
session = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
except AttributeError:
logging.debug("Field StudyID was not found")
session = None
return db_conn.get_session_id(session, visit_id)
def _extract_sequence(db_conn, file_path, session_id):
sequence_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?\.nii', file_path)[0])
return db_conn.get_sequence_id(sequence_name, session_id)
def _extract_visit(db_conn, file_path, pid_in_vid, by_patient, dataset):
participant_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
visit_name = None
if pid_in_vid: # If the patient ID and the visit ID are mixed into the PatientID field (e.g. LREN data)
try:
visit_name = utils.split_patient_id(participant_name)[0]
except TypeError:
visit_name = None
if not pid_in_vid or not visit_name: # Otherwise, we use the StudyID (also used as a session ID) (e.g. PPMI data)
try:
visit_name = str(re.findall('/([^/]+?)/[^/]+?/[^/]+?/[^/]+?\.nii', file_path)[0])
if by_patient: # If the Study ID is given at the patient level (e.g. LREN data), here is a little trick
visit_name = participant_name + "_" + visit_name
except AttributeError:
logging.debug("Field StudyID was not found")
visit_name = None
visit_id = db_conn.get_visit_id(visit_name, dataset)
# Sync visit table with visit_mapping table
participant_id = db_conn.get_participant_id(participant_name, dataset)
visit = db_conn.db_session.query(db_conn.Visit).filter_by(id=visit_id).one_or_none()
if not visit:
visit = db_conn.Visit(
id=visit_id,
participant_id=participant_id,
)
db_conn.db_session.merge(visit)
return visit_id
def _extract_repetition(db_conn, file_path, sequence_id):
repetition_name = str(re.findall('/([^/]+?)/[^/]+?\.nii', file_path)[0])
return db_conn.get_repetition_id(repetition_name, sequence_id)
| 42.222222 | 119 | 0.604211 |
6fd4143b0002fc398cc1e9ab6dca334cdc39cfe6 | 3,020 | py | Python | Lib/fontTools/ttLib/tables/C_B_D_T_.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-04-07T16:47:04.000Z | 2022-01-15T04:01:01.000Z | Lib/fontTools/ttLib/tables/C_B_D_T_.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 74 | 2020-01-30T07:27:54.000Z | 2021-08-03T05:47:17.000Z | Lib/fontTools/ttLib/tables/C_B_D_T_.py | anntzer/fonttools | 726cd67549956b985bbbe83e26fb0af9da59ddf7 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-01-22T20:06:09.000Z | 2020-01-22T20:06:09.000Z | # Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Matt Fontaine
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import E_B_D_T_
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin
import struct
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
# Change the data locator table being referenced.
locatorName = 'CBLC'
# Modify the format class accessor for color bitmap use.
def getImageFormatClass(self, imageFormat):
try:
return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
except KeyError:
return cbdt_bitmap_classes[imageFormat]
# Helper method for removing export features not supported by color bitmaps.
# Write data in the parent class will default to raw if an option is unsupported.
def _removeUnsupportedForColor(dataFunctions):
dataFunctions = dict(dataFunctions)
del dataFunctions['row']
return dataFunctions
class ColorBitmapGlyph(BitmapGlyph):
fileExtension = '.png'
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
def decompile(self):
self.metrics = SmallGlyphMetrics()
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
(dataLen,) = struct.unpack(">L", data[:4])
data = data[4:]
# For the image data cut it to the size specified by dataLen.
assert dataLen <= len(data), "Data overun in format 17"
self.imageData = data[:dataLen]
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
dataList.append(struct.pack(">L", len(self.imageData)))
dataList.append(self.imageData)
return bytesjoin(dataList)
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
def decompile(self):
self.metrics = BigGlyphMetrics()
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
(dataLen,) = struct.unpack(">L", data[:4])
data = data[4:]
# For the image data cut it to the size specified by dataLen.
assert dataLen <= len(data), "Data overun in format 18"
self.imageData = data[:dataLen]
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
dataList.append(struct.pack(">L", len(self.imageData)))
dataList.append(self.imageData)
return bytesjoin(dataList)
class cbdt_bitmap_format_19(ColorBitmapGlyph):
def decompile(self):
(dataLen,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
assert dataLen <= len(data), "Data overun in format 19"
self.imageData = data[:dataLen]
def compile(self, ttFont):
return struct.pack(">L", len(self.imageData)) + self.imageData
# Dict for CBDT extended formats.
cbdt_bitmap_classes = {
17: cbdt_bitmap_format_17,
18: cbdt_bitmap_format_18,
19: cbdt_bitmap_format_19,
}
| 32.473118 | 114 | 0.768543 |
8ab6539fefc10906c9b80dea4a1fe17a24632dc6 | 3,421 | py | Python | gitlint-core/gitlint/tests/contrib/test_contrib_rules.py | carlsmedstad/gitlint | 3c017995633f602125f7caaa91e96bb767ca5953 | [
"MIT"
] | 559 | 2015-09-09T14:45:00.000Z | 2022-03-27T21:23:18.000Z | gitlint-core/gitlint/tests/contrib/test_contrib_rules.py | carlsmedstad/gitlint | 3c017995633f602125f7caaa91e96bb767ca5953 | [
"MIT"
] | 197 | 2015-09-11T01:49:20.000Z | 2022-03-30T01:22:34.000Z | gitlint-core/gitlint/tests/contrib/test_contrib_rules.py | carlsmedstad/gitlint | 3c017995633f602125f7caaa91e96bb767ca5953 | [
"MIT"
] | 97 | 2015-09-09T15:38:09.000Z | 2022-02-20T16:55:12.000Z | # -*- coding: utf-8 -*-
import os
from gitlint.tests.base import BaseTestCase
from gitlint.contrib import rules as contrib_rules
from gitlint.tests.contrib import rules as contrib_tests
from gitlint import rule_finder, rules
class ContribRuleTests(BaseTestCase):
CONTRIB_DIR = os.path.dirname(os.path.realpath(contrib_rules.__file__))
def test_contrib_tests_exist(self):
""" Tests that every contrib rule file has an associated test file.
While this doesn't guarantee that every contrib rule has associated tests (as we don't check the content
of the tests file), it's a good leading indicator. """
contrib_tests_dir = os.path.dirname(os.path.realpath(contrib_tests.__file__))
contrib_test_files = os.listdir(contrib_tests_dir)
# Find all python files in the contrib dir and assert there's a corresponding test file
for filename in os.listdir(self.CONTRIB_DIR):
if filename.endswith(".py") and filename not in ["__init__.py"]:
expected_test_file = "test_" + filename
error_msg = "Every Contrib Rule must have associated tests. " + \
f"Expected test file {os.path.join(contrib_tests_dir, expected_test_file)} not found."
self.assertIn(expected_test_file, contrib_test_files, error_msg)
def test_contrib_rule_naming_conventions(self):
""" Tests that contrib rules follow certain naming conventions.
We can test for this at test time (and not during runtime like rule_finder.assert_valid_rule_class does)
because these are contrib rules: once they're part of gitlint they can't change unless they pass this test
again.
"""
rule_classes = rule_finder.find_rule_classes(self.CONTRIB_DIR)
for clazz in rule_classes:
# Contrib rule names start with "contrib-"
self.assertTrue(clazz.name.startswith("contrib-"))
# Contrib line rules id's start with "CL"
if issubclass(clazz, rules.LineRule):
if clazz.target == rules.CommitMessageTitle:
self.assertTrue(clazz.id.startswith("CT"))
elif clazz.target == rules.CommitMessageBody:
self.assertTrue(clazz.id.startswith("CB"))
def test_contrib_rule_uniqueness(self):
""" Tests that all contrib rules have unique identifiers.
We can test for this at test time (and not during runtime like rule_finder.assert_valid_rule_class does)
because these are contrib rules: once they're part of gitlint they can't change unless they pass this test
again.
"""
rule_classes = rule_finder.find_rule_classes(self.CONTRIB_DIR)
# Not very efficient way of checking uniqueness, but it works :-)
class_names = [rule_class.name for rule_class in rule_classes]
class_ids = [rule_class.id for rule_class in rule_classes]
self.assertEqual(len(set(class_names)), len(class_names))
self.assertEqual(len(set(class_ids)), len(class_ids))
def test_contrib_rule_instantiated(self):
""" Tests that all contrib rules can be instantiated without errors. """
rule_classes = rule_finder.find_rule_classes(self.CONTRIB_DIR)
# No exceptions = what we want :-)
for rule_class in rule_classes:
rule_class()
| 48.871429 | 118 | 0.679041 |
63ee52b52aff96cd5cc95d8352c6b8e5d57d5f90 | 56 | py | Python | tests/data/baz.py | mwtoews/absolufy-imports | e8d184d59900f29497bf9998c947d8fe43b7d8e3 | [
"MIT"
] | 76 | 2021-02-28T21:34:31.000Z | 2022-03-23T21:11:27.000Z | tests/data/baz.py | mwtoews/absolufy-imports | e8d184d59900f29497bf9998c947d8fe43b7d8e3 | [
"MIT"
] | 39 | 2021-03-01T16:47:44.000Z | 2022-03-21T21:48:49.000Z | tests/data/baz.py | mwtoews/absolufy-imports | e8d184d59900f29497bf9998c947d8fe43b7d8e3 | [
"MIT"
] | 6 | 2021-03-02T20:23:48.000Z | 2022-01-20T08:44:53.000Z | from mypackage.mysubpackage import B
print(T)
print(D)
| 11.2 | 36 | 0.785714 |
d1278567bbcd4c3c770620d8b6dfd20b4069b20f | 17,518 | py | Python | .compit/lib/python3.6/site-packages/tornado/test/httputil_test.py | constKutsy/GeoWiki | e336bb618ba00a993accd433a7cc78d87f291dbc | [
"Unlicense"
] | 90 | 2020-11-20T12:35:03.000Z | 2021-07-29T21:20:55.000Z | tornado/test/httputil_test.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 68 | 2016-12-12T20:38:47.000Z | 2020-07-26T18:28:49.000Z | tornado/test/httputil_test.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 120 | 2016-08-18T14:53:03.000Z | 2020-06-16T13:27:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from tornado.httputil import url_concat, parse_multipart_form_data, HTTPHeaders, format_timestamp, HTTPServerRequest, parse_request_start_line, parse_cookie
from tornado.escape import utf8, native_str
from tornado.log import gen_log
from tornado.testing import ExpectLog
from tornado.test.util import unittest
import copy
import datetime
import logging
import pickle
import time
class TestUrlConcat(unittest.TestCase):
def test_url_concat_no_query_params(self):
url = url_concat(
"https://localhost/path",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=y&z=z")
def test_url_concat_encode_args(self):
url = url_concat(
"https://localhost/path",
[('y', '/y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=%2Fy&z=z")
def test_url_concat_trailing_q(self):
url = url_concat(
"https://localhost/path?",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=y&z=z")
def test_url_concat_q_with_no_trailing_amp(self):
url = url_concat(
"https://localhost/path?x",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?x=&y=y&z=z")
def test_url_concat_trailing_amp(self):
url = url_concat(
"https://localhost/path?x&",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?x=&y=y&z=z")
def test_url_concat_mult_params(self):
url = url_concat(
"https://localhost/path?a=1&b=2",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?a=1&b=2&y=y&z=z")
def test_url_concat_no_params(self):
url = url_concat(
"https://localhost/path?r=1&t=2",
[],
)
self.assertEqual(url, "https://localhost/path?r=1&t=2")
def test_url_concat_none_params(self):
url = url_concat(
"https://localhost/path?r=1&t=2",
None,
)
self.assertEqual(url, "https://localhost/path?r=1&t=2")
def test_url_concat_with_frag(self):
url = url_concat(
"https://localhost/path#tab",
[('y', 'y')],
)
self.assertEqual(url, "https://localhost/path?y=y#tab")
def test_url_concat_multi_same_params(self):
url = url_concat(
"https://localhost/path",
[('y', 'y1'), ('y', 'y2')],
)
self.assertEqual(url, "https://localhost/path?y=y1&y=y2")
def test_url_concat_multi_same_query_params(self):
url = url_concat(
"https://localhost/path?r=1&r=2",
[('y', 'y')],
)
self.assertEqual(url, "https://localhost/path?r=1&r=2&y=y")
def test_url_concat_dict_params(self):
url = url_concat(
"https://localhost/path",
dict(y='y'),
)
self.assertEqual(url, "https://localhost/path?y=y")
class MultipartFormDataTest(unittest.TestCase):
def test_file_upload(self):
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--""".replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_unquoted_names(self):
# quotes are optional unless special characters are present
data = b"""\
--1234
Content-Disposition: form-data; name=files; filename=ab.txt
Foo
--1234--""".replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_special_filenames(self):
filenames = ['a;b.txt',
'a"b.txt',
'a";b.txt',
'a;"b.txt',
'a";";.txt',
'a\\"b.txt',
'a\\b.txt',
]
for filename in filenames:
logging.debug("trying filename %r", filename)
data = """\
--1234
Content-Disposition: form-data; name="files"; filename="%s"
Foo
--1234--""" % filename.replace('\\', '\\\\').replace('"', '\\"')
data = utf8(data.replace("\n", "\r\n"))
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], filename)
self.assertEqual(file["body"], b"Foo")
def test_boundary_starts_and_ends_with_quotes(self):
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b'"1234"', data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
def test_missing_headers(self):
data = b'''\
--1234
Foo
--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "multipart/form-data missing headers"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_invalid_content_disposition(self):
data = b'''\
--1234
Content-Disposition: invalid; name="files"; filename="ab.txt"
Foo
--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "Invalid multipart/form-data"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_line_does_not_end_with_correct_line_break(self):
data = b'''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo--1234--'''.replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "Invalid multipart/form-data"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_content_disposition_header_without_name_parameter(self):
data = b"""\
--1234
Content-Disposition: form-data; filename="ab.txt"
Foo
--1234--""".replace(b"\n", b"\r\n")
args = {}
files = {}
with ExpectLog(gen_log, "multipart/form-data value missing name"):
parse_multipart_form_data(b"1234", data, args, files)
self.assertEqual(files, {})
def test_data_after_final_boundary(self):
# The spec requires that data after the final boundary be ignored.
# http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html
# In practice, some libraries include an extra CRLF after the boundary.
data = b"""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--
""".replace(b"\n", b"\r\n")
args = {}
files = {}
parse_multipart_form_data(b"1234", data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b"Foo")
class HTTPHeadersTest(unittest.TestCase):
def test_multi_line(self):
# Lines beginning with whitespace are appended to the previous line
# with any leading whitespace replaced by a single space.
# Note that while multi-line headers are a part of the HTTP spec,
# their use is strongly discouraged.
data = """\
Foo: bar
baz
Asdf: qwer
\tzxcv
Foo: even
more
lines
""".replace("\n", "\r\n")
headers = HTTPHeaders.parse(data)
self.assertEqual(headers["asdf"], "qwer zxcv")
self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"])
self.assertEqual(headers["Foo"], "bar baz,even more lines")
self.assertEqual(headers.get_list("foo"), ["bar baz", "even more lines"])
self.assertEqual(sorted(list(headers.get_all())),
[("Asdf", "qwer zxcv"),
("Foo", "bar baz"),
("Foo", "even more lines")])
def test_unicode_newlines(self):
# Ensure that only \r\n is recognized as a header separator, and not
# the other newline-like unicode characters.
# Characters that are likely to be problematic can be found in
# http://unicode.org/standard/reports/tr13/tr13-5.html
# and cpython's unicodeobject.c (which defines the implementation
# of unicode_type.splitlines(), and uses a different list than TR13).
newlines = [
u'\u001b', # VERTICAL TAB
u'\u001c', # FILE SEPARATOR
u'\u001d', # GROUP SEPARATOR
u'\u001e', # RECORD SEPARATOR
u'\u0085', # NEXT LINE
u'\u2028', # LINE SEPARATOR
u'\u2029', # PARAGRAPH SEPARATOR
]
for newline in newlines:
# Try the utf8 and latin1 representations of each newline
for encoding in ['utf8', 'latin1']:
try:
try:
encoded = newline.encode(encoding)
except UnicodeEncodeError:
# Some chars cannot be represented in latin1
continue
data = b'Cookie: foo=' + encoded + b'bar'
# parse() wants a native_str, so decode through latin1
# in the same way the real parser does.
headers = HTTPHeaders.parse(
native_str(data.decode('latin1')))
expected = [('Cookie', 'foo=' +
native_str(encoded.decode('latin1')) + 'bar')]
self.assertEqual(
expected, list(headers.get_all()))
except Exception:
gen_log.warning("failed while trying %r in %s",
newline, encoding)
raise
def test_optional_cr(self):
# Both CRLF and LF should be accepted as separators. CR should not be
# part of the data when followed by LF, but it is a normal char
# otherwise (or should bare CR be an error?)
headers = HTTPHeaders.parse(
'CRLF: crlf\r\nLF: lf\nCR: cr\rMore: more\r\n')
self.assertEqual(sorted(headers.get_all()),
[('Cr', 'cr\rMore: more'),
('Crlf', 'crlf'),
('Lf', 'lf'),
])
def test_copy(self):
all_pairs = [('A', '1'), ('A', '2'), ('B', 'c')]
h1 = HTTPHeaders()
for k, v in all_pairs:
h1.add(k, v)
h2 = h1.copy()
h3 = copy.copy(h1)
h4 = copy.deepcopy(h1)
for headers in [h1, h2, h3, h4]:
# All the copies are identical, no matter how they were
# constructed.
self.assertEqual(list(sorted(headers.get_all())), all_pairs)
for headers in [h2, h3, h4]:
# Neither the dict or its member lists are reused.
self.assertIsNot(headers, h1)
self.assertIsNot(headers.get_list('A'), h1.get_list('A'))
def test_pickle_roundtrip(self):
headers = HTTPHeaders()
headers.add('Set-Cookie', 'a=b')
headers.add('Set-Cookie', 'c=d')
headers.add('Content-Type', 'text/html')
pickled = pickle.dumps(headers)
unpickled = pickle.loads(pickled)
self.assertEqual(sorted(headers.get_all()), sorted(unpickled.get_all()))
self.assertEqual(sorted(headers.items()), sorted(unpickled.items()))
def test_setdefault(self):
headers = HTTPHeaders()
headers['foo'] = 'bar'
# If a value is present, setdefault returns it without changes.
self.assertEqual(headers.setdefault('foo', 'baz'), 'bar')
self.assertEqual(headers['foo'], 'bar')
# If a value is not present, setdefault sets it for future use.
self.assertEqual(headers.setdefault('quux', 'xyzzy'), 'xyzzy')
self.assertEqual(headers['quux'], 'xyzzy')
self.assertEqual(sorted(headers.get_all()), [('Foo', 'bar'), ('Quux', 'xyzzy')])
def test_string(self):
headers = HTTPHeaders()
headers.add("Foo", "1")
headers.add("Foo", "2")
headers.add("Foo", "3")
headers2 = HTTPHeaders.parse(str(headers))
self.assertEquals(headers, headers2)
class FormatTimestampTest(unittest.TestCase):
# Make sure that all the input types are supported.
TIMESTAMP = 1359312200.503611
EXPECTED = 'Sun, 27 Jan 2013 18:43:20 GMT'
def check(self, value):
self.assertEqual(format_timestamp(value), self.EXPECTED)
def test_unix_time_float(self):
self.check(self.TIMESTAMP)
def test_unix_time_int(self):
self.check(int(self.TIMESTAMP))
def test_struct_time(self):
self.check(time.gmtime(self.TIMESTAMP))
def test_time_tuple(self):
tup = tuple(time.gmtime(self.TIMESTAMP))
self.assertEqual(9, len(tup))
self.check(tup)
def test_datetime(self):
self.check(datetime.datetime.utcfromtimestamp(self.TIMESTAMP))
# HTTPServerRequest is mainly tested incidentally to the server itself,
# but this tests the parts of the class that can be tested in isolation.
class HTTPServerRequestTest(unittest.TestCase):
def test_default_constructor(self):
# All parameters are formally optional, but uri is required
# (and has been for some time). This test ensures that no
# more required parameters slip in.
HTTPServerRequest(uri='/')
def test_body_is_a_byte_string(self):
requets = HTTPServerRequest(uri='/')
self.assertIsInstance(requets.body, bytes)
class ParseRequestStartLineTest(unittest.TestCase):
METHOD = "GET"
PATH = "/foo"
VERSION = "HTTP/1.1"
def test_parse_request_start_line(self):
start_line = " ".join([self.METHOD, self.PATH, self.VERSION])
parsed_start_line = parse_request_start_line(start_line)
self.assertEqual(parsed_start_line.method, self.METHOD)
self.assertEqual(parsed_start_line.path, self.PATH)
self.assertEqual(parsed_start_line.version, self.VERSION)
class ParseCookieTest(unittest.TestCase):
# These tests copied from Django:
# https://github.com/django/django/pull/6277/commits/da810901ada1cae9fc1f018f879f11a7fb467b28
def test_python_cookies(self):
"""
Test cases copied from Python's Lib/test/test_http_cookies.py
"""
self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'})
# Here parse_cookie() differs from Python's cookie parsing in that it
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'}
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'})
# Cookies with ':' character in their name.
self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'})
# Cookies with '[' and ']'.
self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'})
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'})
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'})
def test_invalid_cookies(self):
"""
Cookie strings that go against RFC6265 but browsers will send if set
via document.cookie.
"""
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en').keys())
# Even a double quote may be an unamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'})
# More characters the spec forbids.
self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'})
# Unicode characters. The spec only allows ASCII.
self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': native_str('André Bessette')})
# Browsers don't send extra whitespace or semicolons in Cookie headers,
# but parse_cookie() should parse whitespace the same way
# document.cookie parses whitespace.
self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''})
| 37.511777 | 156 | 0.579347 |
c05d43a76e776996cae8f60b9c7f79dfca3df78e | 169,245 | py | Python | lib-python/3/test/test_io.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | lib-python/3/test/test_io.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | lib-python/3/test/test_io.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
if '__pypy__' in sys.builtin_module_names:
raise ImportError # don't use ctypes, missing ctypes.resize()
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(support.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(os.fsencode(support.TESTFN)))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
@support.impl_detail("PyPy does not call __del__ at shutdown", pypy=False)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.gollect() seems to be enough to
# work around all these issues.
support.gc_collect()
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
| 37.336201 | 103 | 0.576868 |
e3b65acff098067f466a764fb22505e72085444d | 37,557 | py | Python | espnet-semi-supervised/src/asrtts/asrtts_pytorch_5050.py | ElinaBaral/espnet_speech2speech | fb9a02f71339eb5494656c5ff2fdebf5051cd2cd | [
"Apache-2.0"
] | 1 | 2021-02-05T03:33:52.000Z | 2021-02-05T03:33:52.000Z | espnet-semi-supervised/src/asrtts/asrtts_pytorch_5050.py | ElinaBaral/espnet_speech2speech | fb9a02f71339eb5494656c5ff2fdebf5051cd2cd | [
"Apache-2.0"
] | null | null | null | espnet-semi-supervised/src/asrtts/asrtts_pytorch_5050.py | ElinaBaral/espnet_speech2speech | fb9a02f71339eb5494656c5ff2fdebf5051cd2cd | [
"Apache-2.0"
] | 1 | 2021-02-05T03:33:54.000Z | 2021-02-05T03:33:54.000Z | #!/usr/bin/env python
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import copy
import json
import logging
import math
import numpy as np
import random
import os
import pickle
import sys
# chainer related
import chainer
from chainer import reporter as reporter_module
from chainer import training
from chainer.training import extensions
from chainer.training import extension
# torch related
import torch
from torch.autograd import Variable
# spnet related
from asr_utils import adadelta_eps_decay
from asr_utils import CompareValueTrigger
from asr_utils import load_labeldict
from asr_utils import make_batchset
from asr_utils import restore_snapshot
# from tts_pytorch import pad_ndarray_list
from e2e_asr_th import E2E
from e2e_asr_th import Loss
from e2e_asr_th import pad_list
torch_is_old = False
from e2e_tts_th import Tacotron2
from e2e_tts_th import Tacotron2Loss
from distance_th import packed_mmd
from distance_th import packed_gauss_kld
from e2e_asrtts_th import ASRTTSLoss
from e2e_asrtts_th import get_asr_data
from e2e_asrtts_th import get_tts_data
from asrtts_utils import pad_ndarray_list
# for kaldi io
import kaldi_io_py
# matplotlib related
import matplotlib
matplotlib.use('Agg')
# some tuning params
REPORT_INTERVAL = 100
ALL_MODE = True # False
FREEZE_ATT = True
NO_AVG = True
MODEL_FULL = False
from asr_utils import PlotAttentionReport
class CustomPlotAttentionReport(PlotAttentionReport):
def __init__(self, model, isASR, data, outdir, reverse=False):
self.isASR = isASR
if hasattr(model, "module"):
self.model = model.module
else:
self.model = model
if isASR:
att_vis_fn = self.model.asr_loss.predictor.calculate_all_attentions
else:
att_vis_fn = self.model.tts_loss.model.calculate_all_attentions
converter = None
device = None
super().__init__(att_vis_fn, data, outdir, converter, device, reverse)
def __call__(self, trainer):
with torch.no_grad():
if self.isASR:
asr_texts, asr_feats, asr_featlens = get_asr_torch(self.model, self.data)
att_ws = self.att_vis_fn(asr_feats, asr_featlens, asr_texts)
else:
tts_texts, tts_textlens, tts_feats, tts_labels, tts_featlens, spembs \
= get_tts_torch(self.model, self.data)
att_ws = self.att_vis_fn(tts_texts, tts_textlens, tts_feats, spembs)
for idx, att_w in enumerate(att_ws):
filename = "%s/%s.ep.{.updater.epoch}.png" % (
self.outdir, self.data[idx][0])
if self.reverse:
dec_len = int(self.data[idx][1]['input'][0]['shape'][0])
enc_len = int(self.data[idx][1]['output'][0]['shape'][0])
else:
dec_len = int(self.data[idx][1]['output'][0]['shape'][0])
enc_len = int(self.data[idx][1]['input'][0]['shape'][0])
if len(att_w.shape) == 3:
att_w = att_w[:, :dec_len, :enc_len]
else:
att_w = att_w[:dec_len, :enc_len]
self._plot_and_save_attention(att_w, filename.format(trainer))
def converter_kaldi(batch, device=None, use_speaker_embedding=None):
# batch only has one minibatch utterance, which is specified by batch[0]
batch = batch[0]
for data in batch:
feat_asr = kaldi_io_py.read_mat(data[1]['input'][0]['feat'])
feat_tts = kaldi_io_py.read_mat(data[1]['input'][1]['feat'])
data[1]['feat_asr'] = feat_asr
data[1]['feat_tts'] = feat_tts
if use_speaker_embedding is not None:
feat_spembs = kaldi_io_py.read_vec_flt(data[1]['input'][2]['feat'])
data[1]['feat_spembs'] = feat_spembs
return batch
def delete_feat(batch):
for data in batch:
del data[1]['feat_asr']
del data[1]['feat_tts']
if 'feat_spembs' in data[1]:
del data[1]['feat_spembs']
return batch
def get_asr_torch(model, data):
asr_texts, asr_feats, asr_featlens = get_asr_data(model.asr_loss.predictor, data, 'feat')
asr_texts = pad_list(asr_texts, model.asr_loss.predictor.dec.ignore_id)
return asr_texts, asr_feats, asr_featlens
def get_tts_torch(model, data):
return get_tts_data(model, data, 'text', use_speaker_embedding=model.tts_loss.model.spk_embed_dim)
class CustomEvaluater(extensions.Evaluator):
'''Custom evaluater for pytorch'''
def __init__(self, model, iterator, target, converter, device):
super(CustomEvaluater, self).__init__(iterator, target, converter=converter, device=device)
self.model = model
# The core part of the update routine can be customized by overriding.
def evaluate(self):
iterator = self._iterators['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
if not torch_is_old:
torch.set_grad_enabled(False)
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
# read scp files
# x: original json with loaded features
# will be converted to chainer variable later
data = self.converter(batch, use_speaker_embedding=self.model.tts_loss.model.spk_embed_dim)
self.model.eval()
if data[0][1]['utt2mode'] != 'p':
logging.error("Error: evaluation only support a parallel data mode ('p')")
sys.exit()
asr_texts, asr_feats, asr_featlens = get_asr_torch(self.model, data)
asr_loss, asr_acc = self.model.asr_loss(asr_feats, asr_featlens, asr_texts,
do_report=False, report_acc=True) # disable reporter
tts_texts, tts_textlens, tts_feats, tts_labels, tts_featlens, spembs = \
get_tts_data(self.model, data, 'text', use_speaker_embedding=self.model.tts_loss.model.spk_embed_dim)
avg_textlen = float(np.mean(tts_textlens.data.cpu().numpy()))
tts_loss = self.model.tts_loss(tts_texts, tts_textlens, tts_feats, tts_labels, tts_featlens,
spembs=spembs, do_report=False)
s2s_loss = self.model.ae_speech(data)
t2t_loss, t2t_acc = self.model.ae_text(data)
# average loss for all four networks
if NO_AVG:
loss = (asr_loss + tts_loss + s2s_loss + t2t_loss)/ 4.0
else:
loss = (asr_loss/avg_textlen + tts_loss + s2s_loss + t2t_loss/avg_textlen)/ 4.0
loss_data = loss.item() if torch_is_old else loss.item()
if NO_AVG:
asr_loss_data = asr_loss.item() if torch_is_old else asr_loss.item()
else:
asr_loss_data = asr_loss.item()/avg_textlen if torch_is_old else asr_loss.item()/avg_textlen
tts_loss_data = tts_loss.item() if torch_is_old else tts_loss.item()
s2s_loss_data = s2s_loss.item() if torch_is_old else s2s_loss.item()
if NO_AVG:
t2t_loss_data = t2t_loss.item() if torch_is_old else t2t_loss.item()
else:
t2t_loss_data = t2t_loss.item()/avg_textlen if torch_is_old else t2t_loss.item()/avg_textlen
chainer.reporter.report({'d/loss': loss_data})
chainer.reporter.report({'d/asr_loss': asr_loss_data})
chainer.reporter.report({'d/tts_loss': tts_loss_data})
chainer.reporter.report({'d/asr_acc': asr_acc})
chainer.reporter.report({'d/s2s_loss': s2s_loss_data})
chainer.reporter.report({'d/t2t_loss': t2t_loss_data})
chainer.reporter.report({'d/t2t_acc': t2t_acc})
delete_feat(data)
summary.add(observation)
if not torch_is_old:
torch.set_grad_enabled(True)
self.model.train()
return summary.compute_mean()
def update_parameters(att):
for child in att.children():
for param in child.parameters():
if param.grad is not None:
param.grad.data.zero_()
class CustomUpdater(training.StandardUpdater):
'''Custom updater for pytorch'''
def __init__(self, model, grad_clip_threshold, train_iter, train_multi,
opts, converter, device, args):
super(CustomUpdater, self).__init__(
train_iter, opts, converter=converter, device=None)
self.model = model
self.train_multi = train_multi
self.grad_clip_threshold = grad_clip_threshold
self.num_gpu = len(device)
self.opts = opts
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.args = args
self.len_pair = len(train_multi["pair"])
self.reset_idx()
def reset_idx(self):
self.iter_idx = 0
self.speech_idx = np.random.permutation(self.len_pair)
self.text_idx = np.random.permutation(self.len_pair)
def gradient_decent(self, loss, optimizer, freeze_att=False, retain_graph=False, backward=True):
if backward:
optimizer.zero_grad() # Clear the parameter gradients
loss.backward(retain_graph=retain_graph) # Backprop
loss.detach() # Truncate the graph
if freeze_att:
update_parameters(self.model.tts_loss.model.dec.att)
update_parameters(self.model.asr_loss.predictor.att)
# compute the gradient norm to check if it is normal or not
grad_norm = self.clip_grad_norm(self.model.parameters(), self.grad_clip_threshold)
logging.info('total grad norm={}'.format(grad_norm))
grad_norm = self.clip_grad_norm(self.model.asr_loss.parameters(), self.grad_clip_threshold)
logging.info('asr grad norm={}'.format(grad_norm))
grad_norm = self.clip_grad_norm(self.model.tts_loss.parameters(), self.grad_clip_threshold)
logging.info('tts grad norm={}'.format(grad_norm))
grad_norm = self.clip_grad_norm(self.model.ae_speech.asr_enc.parameters(), self.grad_clip_threshold)
grad_norm += self.clip_grad_norm(self.model.ae_speech.tts_dec.parameters(), self.grad_clip_threshold)
logging.info('s2s grad norm={}'.format(grad_norm))
grad_norm = self.clip_grad_norm(self.model.ae_text.tts_enc.parameters(), self.grad_clip_threshold)
grad_norm += self.clip_grad_norm(self.model.ae_text.asr_dec.parameters(), self.grad_clip_threshold)
logging.info('t2t grad norm={}'.format(grad_norm))
if math.isnan(grad_norm):
logging.warning('grad norm is nan. Do not update model.')
else:
optimizer.step()
# The core part of the update routine can be customized by overriding.
def update_core(self):
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator('main')
# Get the next batch ( a list of json files)
pair_batch = train_iter.__next__()
self.update_core_impl(pair_batch)
# update autoencoding
speech_batch = [self.train_multi["speech"][self.speech_idx[self.iter_idx]]]
text_batch = [self.train_multi["text"][self.text_idx[self.iter_idx]]]
if self.args.use_mmd_autoencoding:
self.update_core_impl_autoencode(speech_batch, text_batch)
else:
self.update_core_impl(speech_batch)
self.update_core_impl(text_batch)
self.iter_idx += 1
if self.iter_idx == self.len_pair:
self.reset_idx()
def update_core_impl_autoencode(self, speech_batch, text_batch):
# read scp files
# x: original json with loaded features
# will be converted to chainer variable later
# batch only has one minibatch utterance, which is specified by batch[0]
s_data = self.converter(speech_batch, use_speaker_embedding=self.model.tts_loss.model.spk_embed_dim)
t_data = self.converter(text_batch, use_speaker_embedding=self.model.tts_loss.model.spk_embed_dim)
for data in (s_data, t_data):
asr_texts, asr_feats, asr_featlens = get_asr_torch(self.model, data)
tts_texts, tts_textlens, tts_feats, tts_labels, tts_featlens, spembs = \
get_tts_data(self.model, data, 'text', use_speaker_embedding=self.model.tts_loss.model.spk_embed_dim)
avg_textlen = float(np.mean(tts_textlens.data.cpu().numpy()))
if data[0][1]['utt2mode'] == 'a':
logging.info("audio only mode")
result = self.model.ae_speech(data, return_hidden=True, return_inout=self.args.use_inout_mmd)
s2s_loss, hspad, hslen = result
loss_data = s2s_loss.item()
logging.info("loss: %f", loss_data)
chainer.reporter.report({'t/s2s_loss': loss_data})
elif data[0][1]['utt2mode'] == 't':
logging.info("text only mode")
# t2t_loss, t2t_acc = self.model.ae_text(data)
result = self.model.ae_text(data, return_hidden=True, return_inout=self.args.use_inout_mmd)
t2t_loss, t2t_acc, htpad, htlen = result
loss_data = t2t_loss.item()
logging.info("loss: %f", loss_data)
chainer.reporter.report({'t/t2t_loss': loss_data})
chainer.reporter.report({'t/t2t_acc': t2t_acc})
else:
logging.error("Error: cannot find correct mode ('a', 't')")
sys.exit()
self.opts["s2s"].zero_grad()
self.opts["t2t"].zero_grad()
if self.args.inter_domain_loss == "kl":
domain_loss_fun = packed_gauss_kld
else:
domain_loss_fun = packed_mmd
mmd_loss = domain_loss_fun(hspad, hslen, htpad, htlen)
chainer.reporter.report({'t/ae_mmd_loss': mmd_loss.item() })
loss = self.args.s2s_weight * s2s_loss + self.args.t2t_weight * t2t_loss + self.args.mmd_weight * mmd_loss
loss.backward()
self.gradient_decent(loss, self.opts['s2s'], freeze_att=FREEZE_ATT, backward=False)
self.gradient_decent(loss, self.opts['t2t'], freeze_att=FREEZE_ATT, backward=False)
delete_feat(s_data)
delete_feat(t_data)
def update_core_impl(self, batch):
# read scp files
# x: original json with loaded features
# will be converted to chainer variable later
# batch only has one minibatch utterance, which is specified by batch[0]
if len(batch[0]) < self.num_gpu:
logging.warning('batch size is less than number of gpus. Ignored')
return
data = self.converter(batch, use_speaker_embedding=self.model.tts_loss.model.spk_embed_dim)
# Compute the loss at this time step and accumulate it
asr_texts, asr_feats, asr_featlens = get_asr_torch(self.model, data)
tts_texts, tts_textlens, tts_feats, tts_labels, tts_featlens, spembs = \
get_tts_data(self.model, data, 'text', use_speaker_embedding=self.model.tts_loss.model.spk_embed_dim)
avg_textlen = float(np.mean(tts_textlens.data.cpu().numpy()))
if data[0][1]['utt2mode'] == 'p':
logging.info("parallel data mode")
if ALL_MODE:
modes = ['asr', 'tts', 's2s', 't2t']
else:
modes = ['asr', 'tts']
random.shuffle(modes)
# shuffle
loss_data_sum = 0.0
use_mmd = self.args.mmd_weight != 0.0
for mode in modes:
if mode == 'asr':
if self.args.asr_weight == 0.0:
asr_loss_data = 0 # use nan?
asr_acc = 0
continue
loss, asr_acc = self.model.asr_loss(asr_feats, asr_featlens, asr_texts,
do_report=False, report_acc=True) # disable reporter
if NO_AVG:
pass
else:
loss = loss/avg_textlen
asr_loss_data = loss.item()
chainer.reporter.report({'t/asr_loss': asr_loss_data})
chainer.reporter.report({'t/asr_acc': asr_acc})
loss_data_sum += asr_loss_data
logging.info("asr_loss_data: %f", asr_loss_data)
self.gradient_decent(loss, self.opts[mode])
if mode == 'tts':
if self.args.tts_weight == 0.0:
tts_loss_data = 0 # use nan?
continue
loss = self.model.tts_loss(tts_texts, tts_textlens, tts_feats, tts_labels, tts_featlens,
spembs=spembs, do_report=False)
tts_loss_data = loss.item()
chainer.reporter.report({'t/tts_loss': tts_loss_data})
loss_data_sum += tts_loss_data
logging.info("tts_loss_data: %f", tts_loss_data)
self.gradient_decent(loss, self.opts[mode])
if mode == 's2s':
if self.args.s2s_weight == 0.0 and not use_mmd:
s2s_loss_data = 0.0
continue
result = self.model.ae_speech(data, return_hidden=True, return_inout=self.args.use_inout_mmd)
if self.args.use_inout_mmd:
loss, speech_in, speech_in_len, speech_out, speech_out_len = result
else:
loss, hspad, hslen = result
s2s_loss_data = loss.item()
chainer.reporter.report({'t/s2s_loss': s2s_loss_data})
loss_data_sum += s2s_loss_data
logging.info("s2s_loss_data: %f", s2s_loss_data)
if self.args.s2s_weight != 0.0:
self.gradient_decent(loss, self.opts[mode], freeze_att=FREEZE_ATT, retain_graph=use_mmd)
if mode == 't2t':
if self.args.t2t_weight == 0.0 and not use_mmd:
t2t_loss_data = 0.0
continue
result = self.model.ae_text(data, return_hidden=True, return_inout=self.args.use_inout_mmd)
if self.args.use_inout_mmd:
loss, t2t_acc, text_in, text_in_len, text_out, text_out_len = result
else:
loss, t2t_acc, htpad, htlen = result
if NO_AVG:
pass
else:
loss = loss/avg_textlen
t2t_loss_data = loss.item()
chainer.reporter.report({'t/t2t_loss': t2t_loss_data})
chainer.reporter.report({'t/t2t_acc': t2t_acc})
loss_data_sum += t2t_loss_data
logging.info("t2t_loss_data: %f", t2t_loss_data)
if self.args.t2t_weight != 0.0:
self.gradient_decent(loss, self.opts[mode], freeze_att=FREEZE_ATT, retain_graph=use_mmd)
logging.info("loss_data_sum: %f", loss_data_sum)
if use_mmd:
if self.args.use_inout_mmd:
logging.warning((speech_in.shape, speech_out.shape, speech_in_len, speech_out_len))
speech_loss = packed_mmd(speech_in, speech_in_len, speech_out, speech_out_len)
chainer.reporter.report({'t/ps_mmd': speech_loss.item() })
text_loss = packed_mmd(text_in, text_in_len, text_out, text_out_len)
chainer.reporter.report({'t/pt_mmd': text_loss.item() })
loss = (speech_loss + text_loss) / 2.0
chainer.reporter.report({'t/p_mmd': loss.item() })
else:
if self.args.inter_domain_loss == "kl":
loss_fun = packed_gauss_kld
else:
loss_fun = packed_mmd
loss = loss_fun(hspad, hslen, htpad, htlen)
chainer.reporter.report({'t/mmd_loss': loss.item() })
self.gradient_decent(loss, self.opts["mmd"], freeze_att=FREEZE_ATT)
loss_data = (loss_data_sum + loss.item()) / 5.0
elif ALL_MODE:
loss_data = loss_data_sum / 4.0
else:
loss_data = loss_data_sum / 2.0
logging.info("loss_data: %f", loss_data)
chainer.reporter.report({'t/loss': loss_data})
elif data[0][1]['utt2mode'] == 'a':
logging.info("audio only mode")
result = self.model.ae_speech(data, return_hidden=True, return_inout=self.args.use_inout_mmd)
if self.args.use_inout_mmd:
s2s_loss, speech_in, speech_in_len, speech_out, speech_out_len = result
speech_loss = packed_mmd(speech_in, speech_in_len, speech_out, speech_out_len)
chainer.reporter.report({'t/us_mmd': speech_loss.item() })
loss = (s2s_loss + speech_loss) / 2.0
else:
s2s_loss, hspad, hslen = result
loss = s2s_loss
self.gradient_decent(loss, self.opts['s2s'], freeze_att=FREEZE_ATT)
loss_data = s2s_loss.item()
logging.info("loss: %f", loss_data)
chainer.reporter.report({'t/s2s_loss': loss_data})
elif data[0][1]['utt2mode'] == 't':
logging.info("text only mode")
# t2t_loss, t2t_acc = self.model.ae_text(data)
result = self.model.ae_text(data, return_hidden=True, return_inout=self.args.use_inout_mmd)
if self.args.use_inout_mmd:
t2t_loss, t2t_acc, text_in, text_in_len, text_out, text_out_len = result
text_loss = packed_mmd(text_in, text_in_len, text_out, text_out_len)
chainer.reporter.report({'t/ut_mmd': text_loss.item() })
loss = (t2t_loss + text_loss) / 2.0
else:
t2t_loss, t2t_acc, htpad, htlen = result
loss = t2t_loss
if NO_AVG:
loss = t2t_loss
else:
loss = t2t_loss / avg_textlen
self.gradient_decent(loss, self.opts['t2t'], freeze_att=FREEZE_ATT)
loss_data = t2t_loss.item()
logging.info("loss: %f", loss_data)
chainer.reporter.report({'t/t2t_loss': loss_data})
chainer.reporter.report({'t/t2t_acc': t2t_acc})
else:
logging.error("Error: cannot find correct mode ('p', 'a', 't')")
sys.exit()
delete_feat(data)
class DataParallel(torch.nn.DataParallel):
def scatter(self, inputs, kwargs, device_ids, dim):
r"""Scatter with support for kwargs dictionary"""
if len(inputs) == 1:
inputs = inputs[0]
avg = int(math.ceil(len(inputs) * 1. / len(device_ids)))
# inputs = scatter(inputs, device_ids, dim) if inputs else []
inputs = [[inputs[i:i + avg]] for i in range(0, len(inputs), avg)]
kwargs = torch.nn.scatter(kwargs, device_ids, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.dim)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def extract_json(json, mode):
all_utt = {}
for utt in json:
if json[utt]['utt2mode'] == mode:
all_utt[utt]= json[utt]
return all_utt
def make_batchset_asrtts(data, batch_size, max_length_in, max_length_out, num_batches=0, factor_audio=1, factor_text=4,
half_unpair=False):
data_audio_only = extract_json(data, 'a')
data_text_only = extract_json(data, 't')
data_parallel = extract_json(data, 'p')
unpair_batch_size = batch_size / 2 if half_unpair else batch_size
if len(data_audio_only) > 0:
json_audio_only = make_batchset(data_audio_only, factor_audio * unpair_batch_size,
max_length_in, max_length_out, num_batches)
else:
json_audio_only = []
if len(data_text_only) > 0:
json_text_only = make_batchset(data_text_only, factor_text * unpair_batch_size,
max_length_in, max_length_out, num_batches)
else:
json_text_only = []
if len(data_parallel) > 0:
json_parallel = make_batchset(data_parallel, batch_size, max_length_in,
max_length_out, num_batches)
else:
json_parallel = []
json_all = json_audio_only + json_text_only + json_parallel
return json_all, dict(speech=json_audio_only, text=json_text_only, pair=json_parallel)
def setup_tts_loss(odim_asr, idim_tts, args):
from argparse import Namespace
# define output activation function
if args.tts_output_activation is None:
output_activation_fn = None
elif hasattr(torch.nn.functional, args.tts_output_activation):
output_activation_fn = getattr(torch.nn.functional, args.tts_output_activation)
else:
raise ValueError('there is no such an activation function. (%s)' % args.tts_output_activation)
tts_args = Namespace(
spk_embed_dim=args.tts_spk_embed_dim,
embed_dim=args.tts_embed_dim,
elayers=args.tts_elayers,
eunits=args.tts_eunits,
econv_layers=args.tts_econv_layers,
econv_chans=args.tts_econv_chans,
econv_filts=args.tts_econv_filts,
dlayers=args.tts_dlayers,
dunits=args.tts_dunits,
prenet_layers=args.tts_prenet_layers,
prenet_units=args.tts_prenet_units,
postnet_layers=args.tts_postnet_layers,
postnet_chans=args.tts_postnet_chans,
postnet_filts=args.tts_postnet_filts,
output_activation=output_activation_fn,
adim=args.tts_adim,
aconv_chans=args.tts_aconv_chans,
aconv_filts=args.tts_aconv_filts,
cumulate_att_w=args.tts_cumulate_att_w,
use_batch_norm=args.tts_use_batch_norm,
use_concate=args.tts_use_concate,
dropout=args.tts_dropout_rate,
zoneout=args.tts_zoneout_rate,
monotonic=args.tts_monotonic)
e2e_tts = Tacotron2(
idim=odim_asr,
odim=idim_tts,
args=tts_args)
return Tacotron2Loss(
model=e2e_tts,
use_masking=args.tts_use_masking,
bce_pos_weight=args.tts_bce_pos_weight)
def train(args):
'''Run training'''
# seed setting
torch.manual_seed(args.seed)
# debug mode setting
# 0 would be fastest, but 1 seems to be reasonable
# by considering reproducability
# revmoe type check
if args.debugmode < 2:
chainer.config.type_check = False
logging.info('torch type check is disabled')
# use determinisitic computation or not
if args.debugmode < 1:
torch.backends.cudnn.deterministic = False
logging.info('torch cudnn deterministic is disabled')
else:
torch.backends.cudnn.deterministic = True
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim_asr = int(valid_json[utts[0]]['input'][0]['shape'][1])
idim_tts = int(valid_json[utts[0]]['input'][1]['shape'][1])
odim_asr = int(valid_json[utts[0]]['output'][0]['shape'][1])
assert idim_tts == idim_asr - 3
logging.info('#input dims for ASR: ' + str(idim_asr))
logging.info('#input dims for TTS: ' + str(idim_tts))
logging.info('#output dims: ' + str(odim_asr))
if args.tts_use_speaker_embedding:
args.tts_spk_embed_dim = int(valid_json[utts[0]]['input'][2]['shape'][0])
else:
args.tts_spk_embed_dim = None
# specify attention, CTC, hybrid mode
if args.mtlalpha == 1.0:
mtl_mode = 'ctc'
logging.info('Pure CTC mode')
elif args.mtlalpha == 0.0:
mtl_mode = 'att'
logging.info('Pure attention mode')
else:
mtl_mode = 'mtl'
logging.info('Multitask learning mode')
# specify model architecture for ASR
e2e_asr = E2E(idim_asr, odim_asr, args)
logging.info(e2e_asr)
asr_loss = Loss(e2e_asr, args.mtlalpha)
# specify model architecture for TTS
# reverse input and output dimension
tts_loss = setup_tts_loss(odim_asr, idim_tts, args)
logging.info(tts_loss)
# define loss
model = ASRTTSLoss(asr_loss, tts_loss, args)
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.conf'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to' + model_conf)
# TODO(watanabe) use others than pickle, possibly json, and save as a text
pickle.dump((idim_asr, odim_asr, args), f)
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
# Set gpu
ngpu = args.ngpu
if ngpu == 1:
gpu_id = range(ngpu)
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
elif ngpu > 1:
gpu_id = range(ngpu)
logging.info('gpu id: ' + str(gpu_id))
model = torch.nn.DataParallel(model, device_ids=gpu_id)
model.cuda()
logging.info('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
else:
gpu_id = [-1]
# Setup an optimizer
dummy_target = chainer.Chain()
opts = {}
#opts['asr'] = torch.optim.Adadelta(model.asr_loss.parameters(), rho=0.95, eps=args.eps)
if ngpu > 1:
module = model.module
else:
module = model
optim_class = getattr(torch.optim, args.optim)
opts['asr'] = optim_class(module.asr_loss.parameters(), args.lr*args.asr_weight, eps=args.eps, weight_decay=args.weight_decay)
opts['tts'] = optim_class(module.tts_loss.parameters(), args.lr*args.tts_weight, eps=args.eps, weight_decay=args.weight_decay)
opts['s2s'] = optim_class(module.ae_speech.parameters(), args.lr*args.s2s_weight, eps=args.eps, weight_decay=args.weight_decay)
opts['t2t'] = optim_class(module.ae_text.parameters(), args.lr*args.t2t_weight, eps=args.eps, weight_decay=args.weight_decay)
ae_param = list(set(list(module.ae_speech.parameters()) + list(module.ae_text.parameters())))
opts['mmd'] = optim_class(ae_param, args.lr*args.mmd_weight, eps=args.eps, weight_decay=args.weight_decay)
for key in ['asr', 'tts', 's2s', 't2t', 'mmd']:
# FIXME: TOO DIRTY HACK
setattr(opts[key], "target", dummy_target)
setattr(opts[key], "serialize", lambda s: dummy_target.serialize(s))
# read json data
logging.warning("reading json")
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
# read utt2mode scp
logging.warning("reading utt2mode")
def load_utt2mode(scp, utts):
with open(scp, 'r') as f:
for line in f:
k, v = line.strip().split()
if k in utts.keys():
utts[k]['utt2mode'] = v
load_utt2mode(args.train_utt2mode, train_json)
load_utt2mode(args.valid_utt2mode, valid_json)
# make minibatch list (variable length)
train, train_multi = make_batchset_asrtts(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
half_unpair=args.use_mmd_autoencoding)
valid, valid_multi = make_batchset_asrtts(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches)
# hack to make batchsze argument as 1
# actual bathsize is included in a list
train_iter = chainer.iterators.SerialIterator(train_multi["pair"], 1)
valid_iter = chainer.iterators.SerialIterator(
valid, 1, repeat=False, shuffle=False)
# Set up a trainer
updater = CustomUpdater(model, args.grad_clip, train_iter, train_multi, opts,
converter=converter_kaldi, device=gpu_id, args=args)
trainer = training.Trainer(
updater, (args.epochs, 'epoch'), out=args.outdir)
# Resume from a snapshot
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
if ngpu > 1:
model.module.load_state_dict(torch.load(args.outdir + '/model.acc.best'))
else:
model.load_state_dict(torch.load(args.outdir + '/model.acc.best'))
if args.model:
if ngpu > 1:
model.module.load_state_dict(torch.load(args.model))
else:
model.load_state_dict(torch.load(args.model))
elif args.model_asr:
if ngpu > 1:
model.asr_loss.module.load_state_dict(torch.load(args.model_asr))
else:
model.asr_loss.load_state_dict(torch.load(args.model_asr))
elif args.model_tts:
if ngpu > 1:
model.tts_loss.module.load_state_dict(torch.load(args.model_tts))
else:
model.tts_loss.load_state_dict(torch.load(args.model_tts))
model = trainer.updater.model
# Evaluate the model with the test dataset for each epoch
trainer.extend(CustomEvaluater(
model, valid_iter, dummy_target, converter=converter_kaldi, device=gpu_id))
# Save attention weight each epoch
if args.num_save_attention > 0 and args.mtlalpha != 1.0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
data = converter_kaldi([data], device=gpu_id, use_speaker_embedding=args.tts_spk_embed_dim)
trainer.extend(CustomPlotAttentionReport(
model, True,
data, args.outdir + "/att_ws_asr"), trigger=(1, 'epoch'))
trainer.extend(CustomPlotAttentionReport(
model, False,
data, args.outdir + '/att_ws_tts', reverse=True), trigger=(1, 'epoch'))
# Take a snapshot for each specified epoch
trainer.extend(extensions.snapshot(), trigger=(1, 'epoch'))
# Make a plot for training and validation values
# report keys
report_keys = ['t/loss', 't/tts_loss', 't/s2s_loss', 't/asr_loss', 't/t2t_loss', 't/mmd_loss', 't/ae_mmd_loss',
'd/loss', 'd/tts_loss', 'd/s2s_loss', 'd/asr_loss', 'd/t2t_loss']
trainer.extend(extensions.PlotReport(report_keys,
'epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['t/asr_acc', 'd/asr_acc'],
'epoch', file_name='acc.png'))
# Save best models
def torch_save(path, _):
if ngpu > 1:
torch.save(model.module.state_dict(), path)
torch.save(model.module, path + ".pkl")
else:
torch.save(model.state_dict(), path)
torch.save(model, path + ".pkl")
trainer.extend(extensions.snapshot_object(model, 'model.loss.best', savefun=torch_save),
trigger=training.triggers.MinValueTrigger('d/loss'))
def torch_load(path, obj):
if ngpu > 1:
model.module.load_state_dict(torch.load(path))
else:
model.load_state_dict(torch.load(path))
return obj
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=(REPORT_INTERVAL, 'iteration')))
report_keys = ['t/loss', 't/tts_loss', 't/s2s_loss', 't/asr_loss', 't/t2t_loss', 't/asr_acc', 't/t2t_acc', 't/mmd_loss', 't/ae_mmd_loss',
'd/loss', 'd/tts_loss', 'd/s2s_loss', 'd/asr_loss', 'd/t2t_loss', 'd/asr_acc', 'd/t2t_acc']
report_keys.append('epoch')
report_keys.append('iteration')
report_keys.append('elapsed_time')
trainer.extend(extensions.PrintReport(report_keys), trigger=(REPORT_INTERVAL, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=REPORT_INTERVAL))
# Run the training
trainer.run()
| 43.519119 | 141 | 0.615917 |
9de1a32e0db9813fd27fb0341a22a8d5a677f9ba | 33,844 | py | Python | test/rebuild/test.1.py | katonori/cxxtags | 2730adb040d1334caa1a9ae7e468f498e094d254 | [
"BSD-3-Clause"
] | null | null | null | test/rebuild/test.1.py | katonori/cxxtags | 2730adb040d1334caa1a9ae7e468f498e094d254 | [
"BSD-3-Clause"
] | null | null | null | test/rebuild/test.1.py | katonori/cxxtags | 2730adb040d1334caa1a9ae7e468f498e094d254 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import sys
import os
sys.path.append("../../src/")
sys.path.append("../util/")
import commands
import common
CXXTAGS_QUERY = "../../bin/cxxtags_query"
if len(sys.argv) != 2:
print "usage: cmd db_file"
exit(1)
cur_dir = os.getcwd()
db_dir = sys.argv[1]
q_list = [
"decl "+db_dir+" "+cur_dir+"/main.cpp 3 7", #CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 3 7", #CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 3 7", #CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 3 7", #CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 6 5", #CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 6 5", #CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 6 5", #CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 6 5", #CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 7 5", #~CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 7 5", #~CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 7 5", #~CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 7 5", #~CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 8 18", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 8 18", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 8 18", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 8 18", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 11 6", #CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 11 6", #CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 11 6", #CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 11 6", #CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 11 16", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 11 16", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 11 16", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 11 16", #response
#"decl "+db_dir+" "+cur_dir+"/main.cpp 12 5", #printf
"decl "+db_dir+" "+cur_dir+"/main.cpp 15 7", #CParent1
"def "+db_dir+" "+cur_dir+"/main.cpp 15 7", #CParent1
"ref "+db_dir+" "+cur_dir+"/main.cpp 15 7", #CParent1
"override "+db_dir+" "+cur_dir+"/main.cpp 15 7", #CParent1
"decl "+db_dir+" "+cur_dir+"/main.cpp 18 5", #CParent1
"def "+db_dir+" "+cur_dir+"/main.cpp 18 5", #CParent1
"ref "+db_dir+" "+cur_dir+"/main.cpp 18 5", #CParent1
"override "+db_dir+" "+cur_dir+"/main.cpp 18 5", #CParent1
"decl "+db_dir+" "+cur_dir+"/main.cpp 19 5", #~CParent1
"def "+db_dir+" "+cur_dir+"/main.cpp 19 5", #~CParent1
"ref "+db_dir+" "+cur_dir+"/main.cpp 19 5", #~CParent1
"override "+db_dir+" "+cur_dir+"/main.cpp 19 5", #~CParent1
"decl "+db_dir+" "+cur_dir+"/main.cpp 20 18", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 20 18", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 20 18", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 20 18", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 23 6", #CParent1
"def "+db_dir+" "+cur_dir+"/main.cpp 23 6", #CParent1
"ref "+db_dir+" "+cur_dir+"/main.cpp 23 6", #CParent1
"override "+db_dir+" "+cur_dir+"/main.cpp 23 6", #CParent1
"decl "+db_dir+" "+cur_dir+"/main.cpp 23 16", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 23 16", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 23 16", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 23 16", #response
#"decl "+db_dir+" "+cur_dir+"/main.cpp 24 5", #printf
"decl "+db_dir+" "+cur_dir+"/main.cpp 27 7", #CChild
"def "+db_dir+" "+cur_dir+"/main.cpp 27 7", #CChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 27 7", #CChild
"override "+db_dir+" "+cur_dir+"/main.cpp 27 7", #CChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 28 10", #CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 28 10", #CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 28 10", #CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 28 10", #CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 31 5", #CChild
"def "+db_dir+" "+cur_dir+"/main.cpp 31 5", #CChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 31 5", #CChild
"override "+db_dir+" "+cur_dir+"/main.cpp 31 5", #CChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 32 5", #~CChild
"def "+db_dir+" "+cur_dir+"/main.cpp 32 5", #~CChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 32 5", #~CChild
"override "+db_dir+" "+cur_dir+"/main.cpp 32 5", #~CChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 33 18", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 33 18", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 33 18", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 33 18", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 36 6", #CChild
"def "+db_dir+" "+cur_dir+"/main.cpp 36 6", #CChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 36 6", #CChild
"override "+db_dir+" "+cur_dir+"/main.cpp 36 6", #CChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 36 14", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 36 14", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 36 14", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 36 14", #response
#"decl "+db_dir+" "+cur_dir+"/main.cpp 37 5", #printf
"decl "+db_dir+" "+cur_dir+"/main.cpp 40 7", #CGChild
"def "+db_dir+" "+cur_dir+"/main.cpp 40 7", #CGChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 40 7", #CGChild
"override "+db_dir+" "+cur_dir+"/main.cpp 40 7", #CGChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 41 10", #CChild
"def "+db_dir+" "+cur_dir+"/main.cpp 41 10", #CChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 41 10", #CChild
"override "+db_dir+" "+cur_dir+"/main.cpp 41 10", #CChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 44 5", #CGChild
"def "+db_dir+" "+cur_dir+"/main.cpp 44 5", #CGChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 44 5", #CGChild
"override "+db_dir+" "+cur_dir+"/main.cpp 44 5", #CGChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 45 5", #~CGChild
"def "+db_dir+" "+cur_dir+"/main.cpp 45 5", #~CGChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 45 5", #~CGChild
"override "+db_dir+" "+cur_dir+"/main.cpp 45 5", #~CGChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 46 18", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 46 18", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 46 18", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 46 18", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 49 6", #CGChild
"def "+db_dir+" "+cur_dir+"/main.cpp 49 6", #CGChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 49 6", #CGChild
"override "+db_dir+" "+cur_dir+"/main.cpp 49 6", #CGChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 49 15", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 49 15", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 49 15", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 49 15", #response
#"decl "+db_dir+" "+cur_dir+"/main.cpp 50 5", #printf
"decl "+db_dir+" "+cur_dir+"/main.cpp 53 7", #COther
"def "+db_dir+" "+cur_dir+"/main.cpp 53 7", #COther
"ref "+db_dir+" "+cur_dir+"/main.cpp 53 7", #COther
"override "+db_dir+" "+cur_dir+"/main.cpp 53 7", #COther
"decl "+db_dir+" "+cur_dir+"/main.cpp 54 10", #CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 54 10", #CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 54 10", #CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 54 10", #CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 54 27", #CParent1
"def "+db_dir+" "+cur_dir+"/main.cpp 54 27", #CParent1
"ref "+db_dir+" "+cur_dir+"/main.cpp 54 27", #CParent1
"override "+db_dir+" "+cur_dir+"/main.cpp 54 27", #CParent1
"decl "+db_dir+" "+cur_dir+"/main.cpp 57 5", #COther
"def "+db_dir+" "+cur_dir+"/main.cpp 57 5", #COther
"ref "+db_dir+" "+cur_dir+"/main.cpp 57 5", #COther
"override "+db_dir+" "+cur_dir+"/main.cpp 57 5", #COther
"decl "+db_dir+" "+cur_dir+"/main.cpp 58 5", #~COther
"def "+db_dir+" "+cur_dir+"/main.cpp 58 5", #~COther
"ref "+db_dir+" "+cur_dir+"/main.cpp 58 5", #~COther
"override "+db_dir+" "+cur_dir+"/main.cpp 58 5", #~COther
"decl "+db_dir+" "+cur_dir+"/main.cpp 59 18", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 59 18", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 59 18", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 59 18", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 62 6", #COther
"def "+db_dir+" "+cur_dir+"/main.cpp 62 6", #COther
"ref "+db_dir+" "+cur_dir+"/main.cpp 62 6", #COther
"override "+db_dir+" "+cur_dir+"/main.cpp 62 6", #COther
"decl "+db_dir+" "+cur_dir+"/main.cpp 62 14", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 62 14", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 62 14", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 62 14", #response
#"decl "+db_dir+" "+cur_dir+"/main.cpp 63 5", #printf
"decl "+db_dir+" "+cur_dir+"/main.cpp 66 13", #test
"def "+db_dir+" "+cur_dir+"/main.cpp 66 13", #test
"ref "+db_dir+" "+cur_dir+"/main.cpp 66 13", #test
"override "+db_dir+" "+cur_dir+"/main.cpp 66 13", #test
"decl "+db_dir+" "+cur_dir+"/main.cpp 66 24", #CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 66 24", #CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 66 24", #CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 66 24", #CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 66 34", #a
"def "+db_dir+" "+cur_dir+"/main.cpp 66 34", #a
"ref "+db_dir+" "+cur_dir+"/main.cpp 66 34", #a
"override "+db_dir+" "+cur_dir+"/main.cpp 66 34", #a
"decl "+db_dir+" "+cur_dir+"/main.cpp 68 5", #a
"def "+db_dir+" "+cur_dir+"/main.cpp 68 5", #a
"ref "+db_dir+" "+cur_dir+"/main.cpp 68 5", #a
"override "+db_dir+" "+cur_dir+"/main.cpp 68 5", #a
"decl "+db_dir+" "+cur_dir+"/main.cpp 68 8", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 68 8", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 68 8", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 68 8", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 71 5", #main
"def "+db_dir+" "+cur_dir+"/main.cpp 71 5", #main
"ref "+db_dir+" "+cur_dir+"/main.cpp 71 5", #main
"override "+db_dir+" "+cur_dir+"/main.cpp 71 5", #main
"decl "+db_dir+" "+cur_dir+"/main.cpp 73 5", #CParent0
"def "+db_dir+" "+cur_dir+"/main.cpp 73 5", #CParent0
"ref "+db_dir+" "+cur_dir+"/main.cpp 73 5", #CParent0
"override "+db_dir+" "+cur_dir+"/main.cpp 73 5", #CParent0
"decl "+db_dir+" "+cur_dir+"/main.cpp 73 14", #parent
"def "+db_dir+" "+cur_dir+"/main.cpp 73 14", #parent
"ref "+db_dir+" "+cur_dir+"/main.cpp 73 14", #parent
"override "+db_dir+" "+cur_dir+"/main.cpp 73 14", #parent
"decl "+db_dir+" "+cur_dir+"/main.cpp 74 5", #CChild
"def "+db_dir+" "+cur_dir+"/main.cpp 74 5", #CChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 74 5", #CChild
"override "+db_dir+" "+cur_dir+"/main.cpp 74 5", #CChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 74 12", #child
"def "+db_dir+" "+cur_dir+"/main.cpp 74 12", #child
"ref "+db_dir+" "+cur_dir+"/main.cpp 74 12", #child
"override "+db_dir+" "+cur_dir+"/main.cpp 74 12", #child
"decl "+db_dir+" "+cur_dir+"/main.cpp 75 5", #CGChild
"def "+db_dir+" "+cur_dir+"/main.cpp 75 5", #CGChild
"ref "+db_dir+" "+cur_dir+"/main.cpp 75 5", #CGChild
"override "+db_dir+" "+cur_dir+"/main.cpp 75 5", #CGChild
"decl "+db_dir+" "+cur_dir+"/main.cpp 75 13", #gchild
"def "+db_dir+" "+cur_dir+"/main.cpp 75 13", #gchild
"ref "+db_dir+" "+cur_dir+"/main.cpp 75 13", #gchild
"override "+db_dir+" "+cur_dir+"/main.cpp 75 13", #gchild
"decl "+db_dir+" "+cur_dir+"/main.cpp 76 5", #COther
"def "+db_dir+" "+cur_dir+"/main.cpp 76 5", #COther
"ref "+db_dir+" "+cur_dir+"/main.cpp 76 5", #COther
"override "+db_dir+" "+cur_dir+"/main.cpp 76 5", #COther
"decl "+db_dir+" "+cur_dir+"/main.cpp 76 12", #other
"def "+db_dir+" "+cur_dir+"/main.cpp 76 12", #other
"ref "+db_dir+" "+cur_dir+"/main.cpp 76 12", #other
"override "+db_dir+" "+cur_dir+"/main.cpp 76 12", #other
"decl "+db_dir+" "+cur_dir+"/main.cpp 77 5", #parent
"def "+db_dir+" "+cur_dir+"/main.cpp 77 5", #parent
"ref "+db_dir+" "+cur_dir+"/main.cpp 77 5", #parent
"override "+db_dir+" "+cur_dir+"/main.cpp 77 5", #parent
"decl "+db_dir+" "+cur_dir+"/main.cpp 77 12", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 77 12", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 77 12", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 77 12", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 78 5", #child
"def "+db_dir+" "+cur_dir+"/main.cpp 78 5", #child
"ref "+db_dir+" "+cur_dir+"/main.cpp 78 5", #child
"override "+db_dir+" "+cur_dir+"/main.cpp 78 5", #child
"decl "+db_dir+" "+cur_dir+"/main.cpp 78 11", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 78 11", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 78 11", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 78 11", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 79 5", #gchild
"def "+db_dir+" "+cur_dir+"/main.cpp 79 5", #gchild
"ref "+db_dir+" "+cur_dir+"/main.cpp 79 5", #gchild
"override "+db_dir+" "+cur_dir+"/main.cpp 79 5", #gchild
"decl "+db_dir+" "+cur_dir+"/main.cpp 79 12", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 79 12", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 79 12", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 79 12", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 80 5", #other
"def "+db_dir+" "+cur_dir+"/main.cpp 80 5", #other
"ref "+db_dir+" "+cur_dir+"/main.cpp 80 5", #other
"override "+db_dir+" "+cur_dir+"/main.cpp 80 5", #other
"decl "+db_dir+" "+cur_dir+"/main.cpp 80 11", #response
"def "+db_dir+" "+cur_dir+"/main.cpp 80 11", #response
"ref "+db_dir+" "+cur_dir+"/main.cpp 80 11", #response
"override "+db_dir+" "+cur_dir+"/main.cpp 80 11", #response
"decl "+db_dir+" "+cur_dir+"/main.cpp 81 5", #test
"def "+db_dir+" "+cur_dir+"/main.cpp 81 5", #test
"ref "+db_dir+" "+cur_dir+"/main.cpp 81 5", #test
"override "+db_dir+" "+cur_dir+"/main.cpp 81 5", #test
"decl "+db_dir+" "+cur_dir+"/main.cpp 81 11", #parent
"def "+db_dir+" "+cur_dir+"/main.cpp 81 11", #parent
"ref "+db_dir+" "+cur_dir+"/main.cpp 81 11", #parent
"override "+db_dir+" "+cur_dir+"/main.cpp 81 11", #parent
"decl "+db_dir+" "+cur_dir+"/main.cpp 82 5", #test
"def "+db_dir+" "+cur_dir+"/main.cpp 82 5", #test
"ref "+db_dir+" "+cur_dir+"/main.cpp 82 5", #test
"override "+db_dir+" "+cur_dir+"/main.cpp 82 5", #test
"decl "+db_dir+" "+cur_dir+"/main.cpp 82 11", #child
"def "+db_dir+" "+cur_dir+"/main.cpp 82 11", #child
"ref "+db_dir+" "+cur_dir+"/main.cpp 82 11", #child
"override "+db_dir+" "+cur_dir+"/main.cpp 82 11", #child
"decl "+db_dir+" "+cur_dir+"/main.cpp 83 5", #test
"def "+db_dir+" "+cur_dir+"/main.cpp 83 5", #test
"ref "+db_dir+" "+cur_dir+"/main.cpp 83 5", #test
"override "+db_dir+" "+cur_dir+"/main.cpp 83 5", #test
"decl "+db_dir+" "+cur_dir+"/main.cpp 83 11", #gchild
"def "+db_dir+" "+cur_dir+"/main.cpp 83 11", #gchild
"ref "+db_dir+" "+cur_dir+"/main.cpp 83 11", #gchild
"override "+db_dir+" "+cur_dir+"/main.cpp 83 11", #gchild
"decl "+db_dir+" "+cur_dir+"/main.cpp 84 5", #test
"def "+db_dir+" "+cur_dir+"/main.cpp 84 5", #test
"ref "+db_dir+" "+cur_dir+"/main.cpp 84 5", #test
"override "+db_dir+" "+cur_dir+"/main.cpp 84 5", #test
"decl "+db_dir+" "+cur_dir+"/main.cpp 84 11", #other
"def "+db_dir+" "+cur_dir+"/main.cpp 84 11", #other
"ref "+db_dir+" "+cur_dir+"/main.cpp 84 11", #other
"override "+db_dir+" "+cur_dir+"/main.cpp 84 11", #other
]
ans_list = [
# main.cpp
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/main.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/main.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/main.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/main.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/main.cpp|73|5| CParent0 parent;',
],
[""],
["CParent0|"+cur_dir+"/main.cpp|6|5| CParent0(){}"],
["CParent0|"+cur_dir+"/main.cpp|6|5| CParent0(){}"],
[""],
[""],
["~CParent0|"+cur_dir+"/main.cpp|7|5| ~CParent0(){}"],
["~CParent0|"+cur_dir+"/main.cpp|7|5| ~CParent0(){}"],
[""],
[""],
# 8 18
["response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/main.cpp|68|8| a->response();",
"response|"+cur_dir+"/main.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {",
],
# 11 6
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/main.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/main.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/main.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/main.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/main.cpp|73|5| CParent0 parent;',
],
[""],
# 11 16
["response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/main.cpp|68|8| a->response();",
"response|"+cur_dir+"/main.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {",
],
# 15 7
["CParent1|"+cur_dir+"/main.cpp|15|7|class CParent1"],
["CParent1|"+cur_dir+"/main.cpp|15|7|class CParent1"],
[
"CParent1|"+cur_dir+r'/main.cpp|23|6|void CParent1::response(void) {',
"CParent1|"+cur_dir+r'/main.cpp|54|27|: public CParent0, public CParent1',
],
[""],
# 18 5
["CParent1|"+cur_dir+"/main.cpp|18|5| CParent1(){}"],
["CParent1|"+cur_dir+"/main.cpp|18|5| CParent1(){}"],
[""],
[""],
# 19 5
["~CParent1|"+cur_dir+"/main.cpp|19|5| ~CParent1(){}"],
["~CParent1|"+cur_dir+"/main.cpp|19|5| ~CParent1(){}"],
[""],
[""],
# 20 18
["response|"+cur_dir+"/main.cpp|20|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|23|16|void CParent1::response(void) {"],
[""],
[
"response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {",
],
# 23 6
["CParent1|"+cur_dir+"/main.cpp|15|7|class CParent1"],
["CParent1|"+cur_dir+"/main.cpp|15|7|class CParent1"],
[
"CParent1|"+cur_dir+r'/main.cpp|23|6|void CParent1::response(void) {',
"CParent1|"+cur_dir+r'/main.cpp|54|27|: public CParent0, public CParent1',
],
[""],
# 23 16
["response|"+cur_dir+"/main.cpp|20|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|23|16|void CParent1::response(void) {"],
[""],
[
"response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {",
],
# 27 7
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/main.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/main.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/main.cpp|74|5| CChild child;',
],
[""],
# 28 10
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/main.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/main.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/main.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/main.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/main.cpp|73|5| CParent0 parent;',
],
[""],
# 31 5
["CChild|"+cur_dir+"/main.cpp|31|5| CChild(){}"],
["CChild|"+cur_dir+"/main.cpp|31|5| CChild(){}"],
[""],
[""],
# 39 5
["~CChild|"+cur_dir+"/main.cpp|32|5| ~CChild(){}"],
["~CChild|"+cur_dir+"/main.cpp|32|5| ~CChild(){}"],
[""],
[""],
# 33 18
["response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|36|14|void CChild::response(void) {"],
["response|"+cur_dir+"/main.cpp|78|11| child.response();"],
[
"response|"+cur_dir+"/main.cpp|46|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|49|15|void CGChild::response(void) {",
"response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);",
],
# 36 6
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/main.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/main.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/main.cpp|74|5| CChild child;',
],
[""],
# 36 14
["response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|36|14|void CChild::response(void) {"],
["response|"+cur_dir+"/main.cpp|78|11| child.response();"],
[
"response|"+cur_dir+"/main.cpp|46|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|49|15|void CGChild::response(void) {",
"response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);",
],
# 40 7
["CGChild|"+cur_dir+"/main.cpp|40|7|class CGChild"],
["CGChild|"+cur_dir+"/main.cpp|40|7|class CGChild"],
[
"CGChild|"+cur_dir+r'/main.cpp|49|6|void CGChild::response(void) {',
"CGChild|"+cur_dir+r'/main.cpp|75|5| CGChild gchild;',
],
[""],
# 41 10
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/main.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/main.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/main.cpp|74|5| CChild child;',
],
[""],
# 44 5
["CGChild|"+cur_dir+"/main.cpp|44|5| CGChild(){}"],
["CGChild|"+cur_dir+"/main.cpp|44|5| CGChild(){}"],
[""],
[""],
# 45 5
["~CGChild|"+cur_dir+"/main.cpp|45|5| ~CGChild(){}"],
["~CGChild|"+cur_dir+"/main.cpp|45|5| ~CGChild(){}"],
[""],
[""],
# 46 18
["response|"+cur_dir+"/main.cpp|46|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|49|15|void CGChild::response(void) {"],
["response|"+cur_dir+"/main.cpp|79|12| gchild.response();"],
["response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);"],
# 49 6
["CGChild|"+cur_dir+"/main.cpp|40|7|class CGChild"],
["CGChild|"+cur_dir+"/main.cpp|40|7|class CGChild"],
[
"CGChild|"+cur_dir+r'/main.cpp|49|6|void CGChild::response(void) {',
"CGChild|"+cur_dir+r'/main.cpp|75|5| CGChild gchild;',
],
[""],
# 49 15
["response|"+cur_dir+"/main.cpp|46|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|49|15|void CGChild::response(void) {"],
["response|"+cur_dir+"/main.cpp|79|12| gchild.response();"],
["response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);"],
# 53 7
["COther|"+cur_dir+"/main.cpp|53|7|class COther"],
["COther|"+cur_dir+"/main.cpp|53|7|class COther"],
[
"COther|"+cur_dir+r'/main.cpp|62|6|void COther::response(void) {',
"COther|"+cur_dir+r'/main.cpp|76|5| COther other;',
],
[""],
# 54 10
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/main.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/main.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/main.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/main.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/main.cpp|73|5| CParent0 parent;',
],
[""],
# 54 27
["CParent1|"+cur_dir+"/main.cpp|15|7|class CParent1"],
["CParent1|"+cur_dir+"/main.cpp|15|7|class CParent1"],
[
"CParent1|"+cur_dir+r'/main.cpp|23|6|void CParent1::response(void) {',
"CParent1|"+cur_dir+r'/main.cpp|54|27|: public CParent0, public CParent1',
],
[""],
# 57 5
["COther|"+cur_dir+"/main.cpp|57|5| COther(){}"],
["COther|"+cur_dir+"/main.cpp|57|5| COther(){}"],
[""],
[""],
# 58 5
["~COther|"+cur_dir+"/main.cpp|58|5| ~COther(){}"],
["~COther|"+cur_dir+"/main.cpp|58|5| ~COther(){}"],
[""],
[""],
# 59 18
["response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {"],
["response|"+cur_dir+"/main.cpp|80|11| other.response();"],
[
"response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|20|18| virtual void response(void);",
],
# 62 6
["COther|"+cur_dir+"/main.cpp|53|7|class COther"],
["COther|"+cur_dir+"/main.cpp|53|7|class COther"],
[
"COther|"+cur_dir+r'/main.cpp|62|6|void COther::response(void) {',
"COther|"+cur_dir+r'/main.cpp|76|5| COther other;',
],
[""],
# 62 14
["response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {"],
["response|"+cur_dir+"/main.cpp|80|11| other.response();"],
[
"response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|20|18| virtual void response(void);",
],
# 66 13
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/main.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/main.cpp|82|5| test(&child);",
"test|"+cur_dir+"/main.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/main.cpp|84|5| test(&other);",
],
[""],
# 66 24
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/main.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/main.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/main.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/main.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/main.cpp|73|5| CParent0 parent;',
],
[""],
# 66 34
["a|"+cur_dir+"/main.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+"/main.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+r'/main.cpp|68|5| a->response();'],
[""],
# 68 5
["a|"+cur_dir+"/main.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+"/main.cpp|66|34|static void test(class CParent0 *a)"],
["a|"+cur_dir+r'/main.cpp|68|5| a->response();'],
[""],
# 68 8
["response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/main.cpp|68|8| a->response();",
"response|"+cur_dir+"/main.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {",
],
# 71 5
["main|"+cur_dir+"/main.cpp|71|5|int main()"],
["main|"+cur_dir+"/main.cpp|71|5|int main()"],
[""],
[""],
# 73 5
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
["CParent0|"+cur_dir+"/main.cpp|3|7|class CParent0"],
[
"CParent0|"+cur_dir+r'/main.cpp|11|6|void CParent0::response(void) {',
"CParent0|"+cur_dir+r'/main.cpp|28|10|: public CParent0',
"CParent0|"+cur_dir+r'/main.cpp|54|10|: public CParent0, public CParent1',
"CParent0|"+cur_dir+r'/main.cpp|66|24|static void test(class CParent0 *a)',
"CParent0|"+cur_dir+r'/main.cpp|73|5| CParent0 parent;',
],
[""],
# 73 14
["parent|"+cur_dir+"/main.cpp|73|14| CParent0 parent;"],
["parent|"+cur_dir+"/main.cpp|73|14| CParent0 parent;"],
[
"parent|"+cur_dir+"/main.cpp|77|5| parent.response();",
"parent|"+cur_dir+"/main.cpp|81|11| test(&parent);",
],
[""],
# 74 5
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
["CChild|"+cur_dir+"/main.cpp|27|7|class CChild"],
[
"CChild|"+cur_dir+r'/main.cpp|36|6|void CChild::response(void) {',
"CChild|"+cur_dir+r'/main.cpp|41|10|: public CChild',
"CChild|"+cur_dir+r'/main.cpp|74|5| CChild child;',
],
[""],
# 74 12
["child|"+cur_dir+"/main.cpp|74|12| CChild child;"],
["child|"+cur_dir+"/main.cpp|74|12| CChild child;"],
[
"child|"+cur_dir+"/main.cpp|78|5| child.response();",
"child|"+cur_dir+"/main.cpp|82|11| test(&child);",
],
[""],
# 75 5
["CGChild|"+cur_dir+"/main.cpp|40|7|class CGChild"],
["CGChild|"+cur_dir+"/main.cpp|40|7|class CGChild"],
[
"CGChild|"+cur_dir+r'/main.cpp|49|6|void CGChild::response(void) {',
"CGChild|"+cur_dir+r'/main.cpp|75|5| CGChild gchild;',
],
[""],
# 74 12
["gchild|"+cur_dir+"/main.cpp|75|13| CGChild gchild;"],
["gchild|"+cur_dir+"/main.cpp|75|13| CGChild gchild;"],
[
"gchild|"+cur_dir+"/main.cpp|79|5| gchild.response();",
"gchild|"+cur_dir+"/main.cpp|83|11| test(&gchild);",
],
[""],
# 76 5
["COther|"+cur_dir+"/main.cpp|53|7|class COther"],
["COther|"+cur_dir+"/main.cpp|53|7|class COther"],
[
"COther|"+cur_dir+r'/main.cpp|62|6|void COther::response(void) {',
"COther|"+cur_dir+r'/main.cpp|76|5| COther other;',
],
[""],
# 76 12
["other|"+cur_dir+"/main.cpp|76|12| COther other;"],
["other|"+cur_dir+"/main.cpp|76|12| COther other;"],
[
"other|"+cur_dir+"/main.cpp|80|5| other.response();",
"other|"+cur_dir+"/main.cpp|84|11| test(&other);",
],
[""],
# 77 5
["parent|"+cur_dir+"/main.cpp|73|14| CParent0 parent;"],
["parent|"+cur_dir+"/main.cpp|73|14| CParent0 parent;"],
[
"parent|"+cur_dir+"/main.cpp|77|5| parent.response();",
"parent|"+cur_dir+"/main.cpp|81|11| test(&parent);",
],
[""],
# 77 12
["response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|11|16|void CParent0::response(void) {"],
[
"response|"+cur_dir+"/main.cpp|68|8| a->response();",
"response|"+cur_dir+"/main.cpp|77|12| parent.response();",
],
[
"response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|36|14|void CChild::response(void) {",
"response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {",
],
# 78 5
["child|"+cur_dir+"/main.cpp|74|12| CChild child;"],
["child|"+cur_dir+"/main.cpp|74|12| CChild child;"],
[
"child|"+cur_dir+"/main.cpp|78|5| child.response();",
"child|"+cur_dir+"/main.cpp|82|11| test(&child);",
],
[""],
# 78 11
["response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|36|14|void CChild::response(void) {"],
["response|"+cur_dir+"/main.cpp|78|11| child.response();"],
[
"response|"+cur_dir+"/main.cpp|46|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|49|15|void CGChild::response(void) {",
"response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);",
],
# 79 5
["gchild|"+cur_dir+"/main.cpp|75|13| CGChild gchild;"],
["gchild|"+cur_dir+"/main.cpp|75|13| CGChild gchild;"],
[
"gchild|"+cur_dir+"/main.cpp|79|5| gchild.response();",
"gchild|"+cur_dir+"/main.cpp|83|11| test(&gchild);",
],
[""],
# 79 12
["response|"+cur_dir+"/main.cpp|46|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|49|15|void CGChild::response(void) {"],
["response|"+cur_dir+"/main.cpp|79|12| gchild.response();"],
["response|"+cur_dir+"/main.cpp|33|18| virtual void response(void);"],
# 80 5
["other|"+cur_dir+"/main.cpp|76|12| COther other;"],
["other|"+cur_dir+"/main.cpp|76|12| COther other;"],
[
"other|"+cur_dir+"/main.cpp|80|5| other.response();",
"other|"+cur_dir+"/main.cpp|84|11| test(&other);",
],
[""],
# 80 11
["response|"+cur_dir+"/main.cpp|59|18| virtual void response(void);"],
["response|"+cur_dir+"/main.cpp|62|14|void COther::response(void) {"],
["response|"+cur_dir+"/main.cpp|80|11| other.response();"],
[
"response|"+cur_dir+"/main.cpp|8|18| virtual void response(void);",
"response|"+cur_dir+"/main.cpp|20|18| virtual void response(void);",
],
# 81 5
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/main.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/main.cpp|82|5| test(&child);",
"test|"+cur_dir+"/main.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/main.cpp|84|5| test(&other);",
],
[""],
# 81 11
["parent|"+cur_dir+"/main.cpp|73|14| CParent0 parent;"],
["parent|"+cur_dir+"/main.cpp|73|14| CParent0 parent;"],
[
"parent|"+cur_dir+"/main.cpp|77|5| parent.response();",
"parent|"+cur_dir+"/main.cpp|81|11| test(&parent);",
],
[""],
# 82 5
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/main.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/main.cpp|82|5| test(&child);",
"test|"+cur_dir+"/main.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/main.cpp|84|5| test(&other);",
],
[""],
# 82 11
["child|"+cur_dir+"/main.cpp|74|12| CChild child;"],
["child|"+cur_dir+"/main.cpp|74|12| CChild child;"],
[
"child|"+cur_dir+"/main.cpp|78|5| child.response();",
"child|"+cur_dir+"/main.cpp|82|11| test(&child);",
],
[""],
# 83 5
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/main.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/main.cpp|82|5| test(&child);",
"test|"+cur_dir+"/main.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/main.cpp|84|5| test(&other);",
],
[""],
# 83 11
["gchild|"+cur_dir+"/main.cpp|75|13| CGChild gchild;"],
["gchild|"+cur_dir+"/main.cpp|75|13| CGChild gchild;"],
[
"gchild|"+cur_dir+"/main.cpp|79|5| gchild.response();",
"gchild|"+cur_dir+"/main.cpp|83|11| test(&gchild);",
],
[""],
# 84 5
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
["test|"+cur_dir+"/main.cpp|66|13|static void test(class CParent0 *a)"],
[
"test|"+cur_dir+"/main.cpp|81|5| test(&parent);",
"test|"+cur_dir+"/main.cpp|82|5| test(&child);",
"test|"+cur_dir+"/main.cpp|83|5| test(&gchild);",
"test|"+cur_dir+"/main.cpp|84|5| test(&other);",
],
[""],
# 84 11
["other|"+cur_dir+"/main.cpp|76|12| COther other;"],
["other|"+cur_dir+"/main.cpp|76|12| COther other;"],
[
"other|"+cur_dir+"/main.cpp|80|5| other.response();",
"other|"+cur_dir+"/main.cpp|84|11| test(&other);",
],
[""],
]
err = 0
i = 0
for q in q_list:
err += common.test_one(q, ans_list[i])
i+=1
if err == 0:
print "OK"
else:
print "ERR: %d"%(err)
exit(err)
| 38.198646 | 75 | 0.627704 |
5626002249a49dd81cef065dde2340ebe6b68de3 | 87,231 | py | Python | plugins/modules/oci_data_safe_security_assessment_comparison_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 108 | 2020-05-19T20:46:10.000Z | 2022-03-25T14:10:01.000Z | plugins/modules/oci_data_safe_security_assessment_comparison_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 90 | 2020-06-14T22:07:11.000Z | 2022-03-07T05:40:29.000Z | plugins/modules/oci_data_safe_security_assessment_comparison_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 42 | 2020-08-30T23:09:12.000Z | 2022-03-25T16:58:01.000Z | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_safe_security_assessment_comparison_facts
short_description: Fetches details about a SecurityAssessmentComparison resource in Oracle Cloud Infrastructure
description:
- Fetches details about a SecurityAssessmentComparison resource in Oracle Cloud Infrastructure
- Gets the details of the comparison report on the security assessments submitted for comparison.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
security_assessment_id:
description:
- The OCID of the security assessment.
type: str
required: true
comparison_security_assessment_id:
description:
- The OCID of the baseline security assessment.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific security_assessment_comparison
oci_data_safe_security_assessment_comparison_facts:
# required
security_assessment_id: "ocid1.securityassessment.oc1..xxxxxxEXAMPLExxxxxx"
comparison_security_assessment_id: "ocid1.comparisonsecurityassessment.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
security_assessment_comparison:
description:
- SecurityAssessmentComparison resource
returned: on success
type: complex
contains:
id:
description:
- The OCID of the security assessment that is being compared with a baseline security assessment.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
baseline_id:
description:
- The OCID of the security assessment that is set as a baseline.
returned: on success
type: str
sample: "ocid1.baseline.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current state of the security assessment comparison.
returned: on success
type: str
sample: CREATING
time_created:
description:
- The date and time when the security assessment comparison was created. Conforms to the format defined by
L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
targets:
description:
- A target-based comparison between two security assessments.
returned: on success
type: complex
contains:
baseline_target_id:
description:
- The OCID of the target that is used as a baseline in this comparison.
returned: on success
type: str
sample: "ocid1.baselinetarget.oc1..xxxxxxEXAMPLExxxxxx"
current_target_id:
description:
- The OCID of the target to be compared against the baseline target.
returned: on success
type: str
sample: "ocid1.currenttarget.oc1..xxxxxxEXAMPLExxxxxx"
auditing:
description:
- A comparison between findings belonging to Auditing category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
authorization_control:
description:
- A comparison between findings belonging to Authorization Control category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
data_encryption:
description:
- Comparison between findings belonging to Data Encryption category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
db_configuration:
description:
- Comparison between findings belonging to Database Configuration category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
fine_grained_access_control:
description:
- Comparison between findings belonging to Fine-Grained Access Control category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
privileges_and_roles:
description:
- Comparison between findings belonging to Privileges and Roles category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
user_accounts:
description:
- Comparison between findings belonging to User Accounts category.
returned: on success
type: complex
contains:
current:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
baseline:
description:
- ""
returned: on success
type: complex
contains:
key:
description:
- A unique identifier for the finding. This is common for the finding across targets.
returned: on success
type: str
sample: key_example
severity:
description:
- The severity of the finding.
returned: on success
type: str
sample: HIGH
title:
description:
- The short title for the finding.
returned: on success
type: str
sample: title_example
remarks:
description:
- The explanation of the issue in this finding. It explains the reason for the rule and, if a risk is reported, it may
also explain the recommended actions for remediation.
returned: on success
type: str
sample: remarks_example
details:
description:
- The details of the finding. Provides detailed information to explain the finding summary, typically results from the
assessed database, followed by any recommendations for changes.
returned: on success
type: dict
sample: {}
summary:
description:
- The brief summary of the finding. When the finding is informational, the summary typically reports only the number of
data elements that were examined.
returned: on success
type: str
sample: summary_example
references:
description:
- Provides information on whether the finding is related to a CIS Oracle Database Benchmark recommendation, STIG rule,
or related to a GDPR Article/Recital.
returned: on success
type: complex
contains:
stig:
description:
- Relevant section from STIG.
returned: on success
type: str
sample: stig_example
cis:
description:
- Relevant section from CIS.
returned: on success
type: str
sample: cis_example
gdpr:
description:
- Relevant section from GDPR.
returned: on success
type: str
sample: gdpr_example
removed_items:
description:
- This array identifies the items that are present in the baseline, but are missing from the current assessment.
returned: on success
type: list
sample: []
added_items:
description:
- This array identifies the items that are present in the current assessment, but are missing from the baseline.
returned: on success
type: list
sample: []
modified_items:
description:
- This array contains the items that are present in both the current assessment and the baseline, but are different in the two
assessments.
returned: on success
type: list
sample: []
severity:
description:
- The severity of this diff.
returned: on success
type: str
sample: HIGH
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"baseline_id": "ocid1.baseline.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "CREATING",
"time_created": "2013-10-20T19:20:30+01:00",
"targets": [{
"baseline_target_id": "ocid1.baselinetarget.oc1..xxxxxxEXAMPLExxxxxx",
"current_target_id": "ocid1.currenttarget.oc1..xxxxxxEXAMPLExxxxxx",
"auditing": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"authorization_control": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"data_encryption": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"db_configuration": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"fine_grained_access_control": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"privileges_and_roles": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}],
"user_accounts": [{
"current": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"baseline": {
"key": "key_example",
"severity": "HIGH",
"title": "title_example",
"remarks": "remarks_example",
"details": {},
"summary": "summary_example",
"references": {
"stig": "stig_example",
"cis": "cis_example",
"gdpr": "gdpr_example"
}
},
"removed_items": [],
"added_items": [],
"modified_items": [],
"severity": "HIGH"
}]
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeSecurityAssessmentComparisonFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"security_assessment_id",
"comparison_security_assessment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_security_assessment_comparison,
security_assessment_id=self.module.params.get("security_assessment_id"),
comparison_security_assessment_id=self.module.params.get(
"comparison_security_assessment_id"
),
)
DataSafeSecurityAssessmentComparisonFactsHelperCustom = get_custom_class(
"DataSafeSecurityAssessmentComparisonFactsHelperCustom"
)
class ResourceFactsHelper(
DataSafeSecurityAssessmentComparisonFactsHelperCustom,
DataSafeSecurityAssessmentComparisonFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
security_assessment_id=dict(type="str", required=True),
comparison_security_assessment_id=dict(
aliases=["id"], type="str", required=True
),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="security_assessment_comparison",
service_client_class=DataSafeClient,
namespace="data_safe",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(security_assessment_comparison=result)
if __name__ == "__main__":
main()
| 53.946197 | 159 | 0.350288 |
29d3a881498a895cec9d9309a8a83480eeafd6af | 47,096 | py | Python | django/test/testcases.py | denisenkom/django | db21cab162168d5b0ef271dd572f4332e0897c37 | [
"BSD-3-Clause"
] | null | null | null | django/test/testcases.py | denisenkom/django | db21cab162168d5b0ef271dd572f4332e0897c37 | [
"BSD-3-Clause"
] | null | null | null | django/test/testcases.py | denisenkom/django | db21cab162168d5b0ef271dd572f4332e0897c37 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from copy import copy
import difflib
import errno
from functools import wraps
import json
import os
import posixpath
import re
import sys
import threading
import unittest
from unittest import skipIf # Imported here for backward compatibility
from unittest.util import safe_repr
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.handlers.wsgi import WSGIHandler
from django.core.handlers.base import get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.commands import flush
from django.core.servers.basehttp import (WSGIRequestHandler, WSGIServer,
WSGIServerException)
from django.core.urlresolvers import clear_url_caches, set_urlconf
from django.db import connection, connections, DEFAULT_DB_ALIAS, transaction
from django.db.models.loading import cache
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (CaptureQueriesContext, ContextList,
override_settings, compare_xml)
from django.utils.encoding import force_text
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit, urlparse, unquote
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_abort = transaction.abort
def nop(*args, **kwargs):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.abort = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.abort = real_abort
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* If the class has a 'urls' attribute, replace ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._urlconf_setup()
mail.outbox = []
def _urlconf_setup(self):
set_urlconf(None)
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._urlconf_teardown()
def _urlconf_teardown(self):
set_urlconf(None)
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts
back to the original value when exiting the context.
"""
return override_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(
expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response._charset)
content = content.decode(response._charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
"Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of %s in response"
" (expected %d)" % (real_count, text_repr, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0,
msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err,
repr(field_errors)))
elif field in context[formset].forms[form_index].fields:
self.fail(msg_prefix + "The field '%s' "
"on formset '%s', form %d in "
"context %d contains no errors" %
(field, formset, form_index, i))
else:
self.fail(msg_prefix + "The formset '%s', form %d in "
"context %d does not contain the field '%s'" %
(formset, form_index, i, field))
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in "
"context %d does not contain any non-field "
"errors." % (formset, form_index, i))
self.assertTrue(err in non_field_errors,
msg_prefix + "The formset '%s', form %d "
"in context %d does not contain the "
"non-field error '%s' "
"(actual errors: %s)" %
(formset, form_index, i, err,
repr(non_field_errors)))
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in "
"context %d does not contain any "
"non-form errors." % (formset, i))
self.assertTrue(err in non_form_errors,
msg_prefix + "The formset '%s' in context "
"%d does not contain the "
"non-form error '%s' (actual errors: %s)" %
(formset, i, err, repr(non_form_errors)))
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render "
"the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, ', '.join(template_names)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertRaisesMessage(self, expected_exception, expected_message,
callable_obj=None, *args, **kwargs):
"""
Asserts that the message in a raised exception matches the passed
value.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
return six.assertRaisesRegex(self, expected_exception,
re.escape(expected_message), callable_obj, *args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length':2, 'max_length':20})
self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs),
fieldclass))
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None,
'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None,
'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, needle, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
cache to these applications, then firing post_migrate -- it must run
with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
cache.set_available_apps(self.available_apps)
for db_name in self._databases_names(include_mirrors=False):
flush.Command.emit_post_migrate(verbosity=0, interactive=False, database=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
cache.unset_available_apps()
raise
def _databases_names(self, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(self, 'multi_db', False):
return [alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST_MIRROR']]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = \
conn.ops.sequence_reset_by_name_sql(no_style(),
conn.introspection.sequence_list())
if sql_list:
with transaction.commit_on_success_unless_managed(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name, 'skip_validation': True})
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does rollback, the effect of
# these statements is lost, which can effect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't happen,
# get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
cache.unset_available_apps()
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
call_command('flush', verbosity=0, interactive=False,
database=db_name, skip_validation=True,
reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=self.available_apps is not None)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(set(items), set(values))
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines
to do nothing, and rollsback the test transaction at the end of the test.
You have to use TransactionTestCase, if you need transaction management
inside a test.
"""
def _fixture_setup(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = {}
for db_name in self._databases_names():
self.atomics[db_name] = transaction.atomic(using=db_name)
self.atomics[db_name].__enter__()
# Remove this when the legacy transaction management goes away.
disable_transaction_methods()
for db_name in self._databases_names(include_mirrors=False):
if hasattr(self, 'fixtures'):
try:
call_command('loaddata', *self.fixtures,
**{
'verbosity': 0,
'commit': False,
'database': db_name,
'skip_validation': True,
})
except Exception:
self._fixture_teardown()
raise
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
# Remove this when the legacy transaction management goes away.
restore_transaction_methods()
for db_name in reversed(self._databases_names()):
# Hack to force a rollback
connections[db_name].needs_rollback = True
self.atomics[db_name].__exit__(None, None, None)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, obj, objtype):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(feature):
"""
Skip a test if a database has the named feature
"""
return _deferredSkip(lambda: getattr(connection.features, feature),
"Database has feature %s" % feature)
def skipUnlessDBFeature(feature):
"""
Skip a test unless a database has the named feature
"""
return _deferredSkip(lambda: not getattr(connection.features, feature),
"Database doesn't support feature %s" % feature)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
final_rel_path = posixpath.normpath(unquote(os_rel_path)).lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, static_handler, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = WSGIServer(
(self.host, port), QuietWSGIRequestHandler)
except WSGIServerException as e:
if (index + 1 < len(self.possible_ports) and
hasattr(e.args[0], 'errno') and
e.args[0].errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def join(self, timeout=None):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
super(LiveServerThread, self).join(timeout)
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
static_handler = _StaticFilesHandler
@property
def live_server_url(self):
return 'http://%s:%s' % (
self.server_thread.host, self.server_thread.port)
@classmethod
def setUpClass(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if (conn.vendor == 'sqlite'
and conn.settings_dict['NAME'] == ':memory:'):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = LiveServerThread(host, possible_ports,
cls.static_handler,
connections_override=connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
super(LiveServerTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.join()
# Restore sqlite connections' non-sharability
for conn in connections.all():
if (conn.vendor == 'sqlite'
and conn.settings_dict['NAME'] == ':memory:'):
conn.allow_thread_sharing = False
super(LiveServerTestCase, cls).tearDownClass()
| 41.23993 | 97 | 0.601261 |
8fb0759471d04a6d2affc8ccbe3bec93de724a61 | 8,148 | py | Python | webapp/api/Object/users.py | SCcagg5/My_Youtube | 6234b655ceb18a85d9bdd2c5837ce625c495d07b | [
"MIT"
] | null | null | null | webapp/api/Object/users.py | SCcagg5/My_Youtube | 6234b655ceb18a85d9bdd2c5837ce625c495d07b | [
"MIT"
] | null | null | null | webapp/api/Object/users.py | SCcagg5/My_Youtube | 6234b655ceb18a85d9bdd2c5837ce625c495d07b | [
"MIT"
] | null | null | null | import json, datetime, time
import jwt
import hashlib
import time
import base64
import os
import re
import uuid
import phonenumbers
from .mail import Mailer
from .sql import sql
class user:
def __init__(self, id = -1):
self.id = str(id)
def gettoken(self, id = None):
id = self.__getid(id, self.id)
secret = self.__getsecret()
exp = datetime.datetime.utcnow() + datetime.timedelta(hours=48)
ret = jwt.encode({'exp': exp, 'id': id, 'password': hash(str(id) + str(secret))}, secret).decode('utf-8')
return [True, {'exp': str(exp), "usrtoken": str(ret)}, None, {"usrtoken": str(ret)}]
def verify(self, token, id = None):
secret = self.__getsecret()
try:
decoded = jwt.decode(token, secret, leeway=10, algorithms=['HS256'])
id = self.__getid(id, decoded["id"])
if decoded["password"] != hash(str(id) + str(secret)):
raise
self.id = id
except jwt.ExpiredSignature:
return [False, "Signature expired", 403]
except:
return [False, "Invalid usrtoken", 400]
return [True, {}, None]
def verify_key(self, key):
arr_key = key.split("|")
if len(arr_key) < 2:
return [False, "Invalid key", 400]
email = base64.b64decode(str.encode(arr_key[0])).decode("utf-8")
password = arr_key[1]
res = sql.get("SELECT `id`, `password`, `valid` \
FROM `user` \
WHERE `email` = %s",
(email))
if len(res) != 1:
return [False, "Invalid user or key: " + email, 400]
if self.__activationkey(email, res[0][1]) != key:
return [False, "Invalid user or key: "+ self.__activationkey(email, res[0][1]), 400]
if int(res[0][2]) != 0:
return [False, "Acccount already activated", 400]
succes = sql.input("UPDATE `user` SET valid = 1 WHERE `email` = %s", (email))
if not succes:
return [False, "data input error", 500]
self.id = res[0][0]
return [True, {}, None]
def check_activation(self, id = None):
id = self.__getid(id, self.id)
res = sql.get("SELECT `valid` FROM `user` WHERE `id` = %s", (id))
if len(res) < 1:
return [False, "Verification error", 500]
if int(res[0][0]) == 1:
return [True, {}, None]
return [False, "Invalid right, activate your account", 401]
def register(self, usr, pseudo, email, passw):
if self.__exist_email(email):
return [False, "Email already in use", 400]
if self.__exist_username(usr):
return [False, "Username already in use", 400]
if not self.__email(email):
return [False, "Invalid email", 400]
id = str(uuid.uuid4())
password = self.__hash(email, passw)
date = str(int(round(time.time() * 1000)))
succes = sql.input("INSERT INTO `user` (`id`, `username`, `pseudo`, `email`, `password`, `date`, `valid`) VALUES (%s, %s, %s, %s, %s, %s, %s)", \
(id, usr, pseudo, email, password, date, 0))
if not succes:
return [False, "data input error", 500]
key = self.__activationkey(email, password)
#try:
Mailer().new_user(email, key, int(date)/1000)
#except:
# sql.input("DELETE FROM `user` WHERE `email` = %s", (email,))
# return [False, "Registration error", 500]
return [True, {}, None]
def login(self, email, passw):
password = self.__hash(email, passw)
res = sql.get("SELECT `id` FROM `user` WHERE `email` = %s AND `password` = %s", (email, password))
if len(res) > 0:
self.id = str(res[0][0])
return [True, {"user_id": self.id}, None]
else:
res = sql.get("SELECT `email` FROM `user` WHERE `username` = %s", (email))
if len(res) > 0:
email = str(res[0][0])
password = self.__hash(email, passw)
res = sql.get("SELECT `id` FROM `user` WHERE `email` = %s AND `password` = %s", (email, password))
if len(res) > 0:
self.id = str(res[0][0])
return [True, {"user_id": self.id}, None]
return [False, "Invalid email or password", 403]
def delete(self, id):
if self.id != str(id):
return [False, "You cannot delete another user", 403]
sql.input("DELETE FROM `user` WHERE `id` = %s", (self.id))
return [True, {}, None]
def update_data(self, id, username, pseudo, email):
if self.id != str(id):
return [False, "You cannot edit another user", 403]
date = str(int(round(time.time() * 1000)))
succes = sql.input("INSERT `user` (`id`, `username`, `pseudo`, `email`, `password`, `date`) \
VALUES (%s, %s, %s, %s, %s, %s) \
ON DUPLICATE KEY \
UPDATE `username` = %s, `pseudo` = %s, `email` = %s",
(id, username, pseudo, email, 'data', date, username, pseudo, email))
if not succes:
return [False, "data input error", 500]
return [True, {}, None]
def infos(self, id = None):
if id is None:
id = self.id
res = sql.get("SELECT username, pseudo, email, date FROM user WHERE id = %s", (id))
if len(res) > 0:
ret = {'username': '@' + str(res[0][0]), 'pseudo': str(res[0][1]), 'since': str(res[0][3])}
if self.id == str(id):
ret["email"] = res[0][2]
return [True, {str(id): ret}, None]
return [False, "Invalid user id", 404]
def all_user(self, psd = "", page = 0, perPage = None):
total = sql.get("SELECT COUNT(id) FROM `user`", ())[0][0]
pseudo = "%" + str("" if psd is None else psd) + "%"
page = 0 if page is None or int(page) - 1 < 0 else int(page) - 1
perPage = 25 if perPage is None or perPage < 1 else int(perPage)
res = sql.get("SELECT id, username, pseudo, date FROM `user` WHERE username LIKE %s OR pseudo LIKE %s LIMIT %s OFFSET %s", (pseudo, pseudo, perPage, perPage * page))
i = 0
ret = {}
while i < len(res):
ret[res[i][0]] = {'username': '@' + str(res[i][1]), 'pseudo': str(res[i][2]), 'since': str(res[i][3])}
i += 1
return [True, {"users": ret, "pager": {"total": total, "current": {"from": int(perPage * page + 1), "to": int(perPage * (page + 1))}, "options": {"perPage": perPage, "page": page + 1, "search": psd}}}, None]
def __hash(self, email, password):
if password is None or email is None:
return None
s = len(email)
n = s % (len(password) - 1 if len(password) > 1 else 1)
secret = self.__getsecret()
salted = password[:n] + str(s) + password[n:] + secret
hashed = hashlib.sha512(salted.encode('utf-8')).hexdigest()
return hashed
def __activationkey(self, email, password):
return base64.b64encode(str.encode(email)).decode("utf-8") + '|' + hashlib.sha512((email + password).encode('utf-8')).hexdigest()
def __exist_email(self, email):
res = sql.get("SELECT `id` FROM `user` WHERE `email` = %s", (email))
if len(res) > 0:
return True
return False
def __exist_username(self, usr):
res = sql.get("SELECT `id` FROM `user` WHERE `username` = %s", (usr))
if len(res) > 0:
return True
return False
def __getsecret(self):
return str(os.getenv('API_SCRT', '!@ws4RT4ws212@#%'))
def __getid(self, id, idbis = None):
return id if id != "-1" and id is not None else idbis if idbis is not None else self.id
def __email(self, email):
return re.match("[^@]+@[^@]+\.[^@]+", email)
| 43.572193 | 216 | 0.518409 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.